def parse_args(): usage = """%prog [options] start | stop | restart | status PyDirectord Copyright (C) 2016 Martin Herrmann This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions; read LICENSE.txt for details.""" parser = optparse.OptionParser(usage=usage, version="%prog " + __version__) parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="don't start as daemon and log verbosely") parser.add_option("-f", "--file", dest="config_file", default=external.config_file, help="use this configuration file [default: %default]", metavar="CONFIG") (options, args) = parser.parse_args() config.parse_config(options.config_file) # parse the config file global_config, virtuals = config.parse_config(options.config_file) # insert PyDirectord version information into global_config global_config.version = __version__ # make some changes depending on the command-line arguments if options.debug: global_config.supervised = True global_config.log_level = logging.DEBUG else: # determine initial action action = args[0] if len(args) >= 1 else None if action is None: if global_config.supervised: pass # nothing to do, this is fine else: print("No action specified, terminating...", file=sys.stderr) sys.exit(4) elif action == "start": global_config.initial_action = Action.start elif action == "stop": global_config.initial_action = Action.stop elif action == "restart": global_config.initial_action = Action.restart elif action == "reload": global_config.initial_action = Action.reload elif action == "status": global_config.initial_action = Action.status else: print("Unknown action '%s', terminating..." % action, file=sys.stderr) sys.exit(4) return global_config, virtuals
def test_parse_config_invalid_xml(self): xml = """<?xml version="1.0" encoding="UTF-8"?> <config> </conf> """ filename = _create_xml(xml) with self.assertRaisesRegex(RuntimeError, "Invalid XML for .*"): config.parse_config(filename)
def test_backslash(self): # Backslash means line continuation: res = config.parse_config(test_backslash_1) self.failUnless(res['x'] == 2) res = config.parse_config(test_backslash_2) self.failUnless(res['x'] == 0)
def test_string_literals(self): # test some string definitions. res = config.parse_config(test_string_literals_1) self.failUnless(len(res['x']) == 0 and res['x'] == res['y']) ###BUG? Single quote ' seems to translate into "!! ### res = config.parse_config(test_string_literals_2) self.failUnless(len(res['x']) == 1 and res['x'] == res['y'] and ord(res['x']) == 39) res = config.parse_config(test_string_literals_3) self.failUnless(len(res['x']) == 1 and res['x'] == res['y'] and ord(res['x']) == 34) res = config.parse_config(test_string_literals_4) self.failUnless(len(res['x']) == 24 and res['x'] == res['y']) res = config.parse_config(test_string_literals_5) self.failUnless(len(res['x']) == 24 and res['x'] == res['y']) res = config.parse_config(test_string_literals_6) self.failUnless(res['x'] == res['y']) res = config.parse_config(test_string_literals_6 + test_string_literals_7) self.failUnless(res['x'] == res['y']) res = config.parse_config(test_string_literals_6 + test_string_literals_8) self.failUnless(res['x'] == res['y']) res = config.parse_config(test_string_literals_6 + test_string_literals_9) self.failUnless(res['x'] == res['y'])
async def collector() -> None: """Collector This loop will listen for messages coming from the nodes as responses from functions. Once a new message arrives it will get stored in the database and/or re-scheduled for further processing. """ context = zmq.Context() responses = context.socket(zmq.PULL) collector_endpoint = parse_config("queue")['collector_endpoint'] responses.connect(collector_endpoint) daemon_id = uuid.uuid4().hex collector_id = cluster.join(daemon_id, 'collector') print('Collector {} running...'.format(daemon_id)) try: while True: result = responses.recv_json() await db.save_result(result) logging.info('Saved result for function {} with request {}'.format( result['function_id'], result['request_id'])) except (KeyboardInterrupt, Exception) as error: logging.error(error) finally: responses.close() cluster.leave(daemon_id)
def main(): config = parse_config() checkpoint = torch.load(config.save_model + ".pt", map_location=config.device) train_data = DataBatchIterator(config=config, is_train=True, dataset="train", batch_size=config.batch_size, shuffle=True) train_data.load() vocab = train_data.vocab # 载入测试集合 valid_data = DataBatchIterator(config=config, is_train=False, dataset="test", batch_size=config.batch_size) valid_data.set_vocab(vocab) valid_data.load() # Do training. padding_idx = vocab.stoi[PAD] train_textcnn_model(checkpoint, train_data, valid_data, padding_idx, config)
def main(): # 读配置文件 config = parse_config() # 载入训练集合 train_data = DataBatchIterator(config=config, is_train=True, dataset="train", batch_size=config.batch_size, shuffle=True) train_data.load() vocab = train_data.vocab #词汇映射表 # 载入测试集合 test_data = DataBatchIterator(config=config, is_train=False, dataset="test", batch_size=config.batch_size) test_data.set_vocab(vocab) test_data.load() # 测试时载入模型 model = torch.load(config.save_model + ".pt", map_location=config.device) print(model) test(model, test_data)
def get_config(return_unparsed=False): """Gets config and creates data_dir.""" config, unparsed = parse_config() # If we have unparsed args, print usage and exit if len(unparsed) > 0 and not return_unparsed: print_usage() exit(1) def append_data_dir(p): return os.path.join(config.data_dir, p) # Append data_dir to all filepaths config.pre_save_file = append_data_dir(config.pre_save_file) config.raw_csv_file = append_data_dir(config.raw_csv_file) config.embeddings_model = append_data_dir(config.embeddings_model) config.embeddings_file = append_data_dir(config.embeddings_file) # Create data_dir if it doesn't exist if not os.path.exists(config.data_dir): os.makedirs(config.data_dir) if return_unparsed: return config, unparsed return config
def load(config_raw: t.Dict[str, t.Any]) -> bool: """ This function runs on the main process after the module is imported. It should parse and validate the configuration and do other basic setup of the module. Do not start any threads or long running tasks here, they should go in the ``start`` function. """ if not hasattr(sys.modules[__name__], "mqtt"): log.error( "MQTTany's MQTT module requires 'paho-mqtt' to be installed, " "please see the wiki for instructions on how to install requirements" ) return False config_data = parse_config(config_raw, CONF_OPTIONS, log) del config_raw if config_data: log.debug("Config loaded successfully") config_data[CONF_KEY_CLIENTID] = config_data[CONF_KEY_CLIENTID].format( hostname=socket.gethostname()) config_data[CONF_KEY_TOPIC_ROOT] = config_data[ CONF_KEY_TOPIC_ROOT].format( hostname=socket.gethostname(), client_id=config_data[CONF_KEY_CLIENTID]) config_data[ CONF_KEY_TOPIC_LWT] = f"{config_data[CONF_KEY_TOPIC_ROOT]}/{config_data[CONF_KEY_TOPIC_LWT]}" CONFIG.update(config_data) del config_data return True else: log.error("Error loading config") return False
def main(args): print('Configuration file in', args.config_dir) config = parse_config(args) device = torch.device('cuda') config['device'] = device print('gpu count', torch.cuda.device_count()) config['eval_json'] = args.eval_json config['eval_pathname'] = os.path.join(config['data_dir'], 'labels', config['eval_json']) config['load_features'] = args.load_features config['save_features'] = args.save_features config['eval_model'] = args.eval_model config['eval_model_esc'] = args.eval_model_esc config['eval_by_angle'] = args.eval_by_angle tic = time.time() if config['eval_model']: print('----evaluating feature model...') eval_model(config) if config['eval_model_esc']: print('----evaluating feature model with epipolar soft constraint...') eval_model_esc(config) if config['eval_by_angle']: print('----evaluating by angle differences...') eval_by_angle(config) toc = time.time() print('completed evaluation, time spent', int(toc - tic))
def main(): # 读配置文件 config = parse_config() # 载入测试集 test_data = DataBatchIterator(config=config, is_train=False, dataset="test", batch_size=config.batch_size) test_data.load() # 加载模型 model = torch.load(config.save_model + ".pt", map_location=config.device) model.eval() test_data_iter = iter(test_data) y_pred = [] # 预测值 y_true = [] # 真实标签 for idx, batch in enumerate(test_data_iter): outputs = model(batch.sent) pred_each = torch.max(outputs, 1)[1].numpy().tolist() true_each = batch.label.numpy().tolist() y_pred = y_pred + pred_each y_true = y_true + true_each target_names = [ 'news_edu', 'news_finance', 'news_house', 'news_travel', 'news_tech', 'news_sports', 'news_game', 'news_culture', 'news_car', 'news_story', 'news_entertainment', 'news_tech', 'news_agriculture', 'news_world', 'news_stock' ] print(classification_report(y_true, y_pred, target_names=target_names)) classification_report(y_true, y_pred, target_names=target_names) print('hello')
def command(timebook, config, switch, out, at, resume, messages, **kwargs): # get the db cfg = parse_config(config) db = Database(timebook, cfg) if switch: commands.switch.switch(db, switch) sheet = switch else: sheet = db.get_current_sheet() timestamp = parse_date_time_or_now(at) if out: commands.out.out(db, timestamp) if db.get_active_info(sheet): parser.error("the timesheet is already active") message = " ".join(messages) most_recent_clockout = db.get_most_recent_clockout(sheet) if most_recent_clockout: (previous_timestamp, previous_description) = most_recent_clockout if timestamp < previous_timestamp: parser.error("error: time periods could end up overlapping") if resume: if message: parser.error('"--resume" sets the note, so you cannot also ' "supply the message") message = previous_description _in(db, sheet, timestamp, message)
def command(timebook, config, sheet, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) switch_to_default=False current_sheet = db.get_current_sheet() if not sheet or sheet == current_sheet: switch_to_default=True if not sheet: sheet = current_sheet try: confirm=(input('Delete timesheet "%s"? [y/N]: ' % sheet).strip().lower() == 'y') except(KeyboardInterrupt, EOFError): confirm=False if not confirm: print('cancelled') return None kill(db, sheet) if switch_to_default: commands.switch.switch(db, 'default')
def configure(): script_name = os.path.splitext(__file__)[0] parser = config.parse_config(script_name) # parser.add_argument("--a", help="some option") # additional args here args = parser.parse_args() # config.configure_logging(args) return args
def test_valid_with_alerts(self): xml = """<?xml version="1.0" encoding="UTF-8"?> <config> <client ip='localhost' username='******' password='******' mail='*****@*****.**' platform='Linux'> <alert type="memory" limit="80%" /> <alert type="cpu" limit="50%" /> </client> <client ip='localhost' username='******' password='******' mail='*****@*****.**' platform='Windows'> <alert type="memory" limit="80%" /> <alert type="cpu" limit="50%" /> </client> </config> """ filename = _create_xml(xml) xml_root = config.parse_config(filename) clients = config.parse_clients(xml_root) self.assertEqual(2, len(clients)) self.assertEqual('Linux', clients[0]['platform']) self.assertEqual('Windows', clients[1]['platform']) self.assertEqual({'memory': 80, 'cpu': 50}, clients[0]['alerts'])
def test_parse_config_valid_xml(self): xml = """<?xml version="1.0" encoding="UTF-8"?> <config></config> """ filename = _create_xml(xml) xml_root = config.parse_config(filename) self.assertIsInstance(xml_root, ET.Element)
def main(config_file, role_arn, output, verbose): """Fetch AWS API Keys using SSO web login""" if verbose: logger.setLevel(logging.DEBUG) # Parse config file config = parse_config(config_file) config['openid-configuration'] = requests.get( config['well_known_url']).json() config['jwks'] = requests.get( config['openid-configuration']['jwks_uri']).json() logger.debug('JWKS : {}'.format(config['jwks'])) logger.debug('Config : {}'.format(config)) tokens = login(config['openid-configuration']['authorization_endpoint'], config['openid-configuration']['token_endpoint'], config['client_id'], config['scope']) logger.debug('ID token : {}'.format(tokens['id_token'])) id_token_dict = jwt.decode(token=tokens['id_token'], key=config['jwks'], audience=config['client_id']) logger.debug('ID token dict : {}'.format(id_token_dict)) credentials = sts_conn.get_credentials(tokens['id_token'], role_arn=role_arn) if not credentials: exit(1) logger.debug(credentials) if output == 'envvar': print(get_aws_env_variables(credentials))
def main(): if len(sys.argv) < 2: sys.exit(1) update_config = sys.argv[1] config = parse_config(update_config) print(config.channel)
def validate_model(): # parse config args = parse_args() config = parse_config(args.config) val_config = merge_configs(config, 'test', vars(args)) val_dataset = ECO_Dataset(args.model_name.upper(), val_config, mode='test') val_loader = paddle.io.DataLoader(val_dataset, places=paddle.CUDAPlace(0), batch_size=None, batch_sampler=None) val_model = ECO.GoogLeNet(val_config['MODEL']['num_classes'], val_config['MODEL']['seg_num'], val_config['MODEL']['seglen'], 'RGB', 0.00002) model_dict = paddle.load(args.save_dir + '/ucf_model_hapi') val_model.set_state_dict(model_dict) val_model.eval() acc_list = [] for batch_id, data in enumerate(val_loader()): img = data[0] label = data[1] out, acc = val_model(img, label) if out is not None: acc_list.append(acc.numpy()[0]) val_model.train() return np.mean(acc_list)
def test_get_conf_call(monkeypatch): testargs = ["prog", "-u", "http://test", "-p", "1234", "-n", "127.0.0.1"] monkeypatch.setattr('sys.argv', testargs) testconf = parse_config() assert testconf.nb_url == "http://test" assert testconf.nb_token == "1234" assert testconf.networks == "127.0.0.1"
def main(args): # Parse config file config = parse_config(args.config_file) # verify the config file and get the Carbon Black Cloud Server list output_params, server_list = verify_config(config) # Store Forward. Attempt to send messages that have been saved due to a failure to reach the destination send_stored_data(output_params) logger.info("Found {0} Carbon Black Cloud Servers in config file".format( len(server_list))) # Iterate through our Carbon Black Cloud Server list for server in server_list: logger.info("Handling notifications for {0}".format( server.get('server_url'))) notification_logs = fetch_notification_logs( server, output_params['output_format'], output_params['policy_action_severity']) logger.info("Sending Notifications") send_new_data(output_params, notification_logs) logger.info("Done Sending Notifications") audit_logs = fetch_audit_logs(server, output_params['output_format']) logger.info("Sending Audit Logs") send_new_data(output_params, audit_logs) logger.info("Done Sending Audit Logs")
def getChipArea(exp_config_path, **kwargs): exp_path = os.path.expandvars(os.path.expanduser(exp_config_path)) exp_config = config.parse_config(exp_path) batch_size = int(kwargs.get('batch_size', exp_config.model_config.batch_size)) hidden_dim = int(kwargs.get('hidden_dim', exp_config.model_config.layer_size)) dp = int(kwargs.get('dp', exp_config.sch_config.dp)) lp = int(kwargs.get('lp', exp_config.sch_config.lp)) #type:-1 no kp #type: 1 col-row #type: 2 row-col kp_type = int(kwargs.get('kp_type', -1)) kp1 = int(kwargs.get('kp1', 1)) kp2 = int(kwargs.get('kp2', 1)) tot_mem = getMemUsagePerCore(exp_config, batch_size=batch_size, hidden_dim=hidden_dim, dp=dp, lp=lp, kp_type=kp_type, kp1=kp1, kp2=kp2)[0] stack_capacity = exp_config.tech_config.DRAM.stack_capacity area_per_stack = exp_config.tech_config.DRAM.area_per_stack node_area_budget = exp_config.area_breakdown.node_area_budget mem_area = math.ceil(tot_mem / stack_capacity) * area_per_stack #print("Node_Area: {}, Mem_area: {}".format(node_area_budget, mem_area)) chip_area_budget = node_area_budget - mem_area return chip_area_budget
def validate_model(): # parse config args = parse_args() config = parse_config(args.config) val_config = merge_configs(config, 'test', vars(args)) val_reader = KineticsReader(args.model_name.upper(), 'test', val_config).create_reader() val_model = ECO.GoogLeNet(val_config['MODEL']['num_classes'], val_config['MODEL']['seg_num'], val_config['MODEL']['seglen'], 'RGB') model, _ = fluid.dygraph.load_dygraph(args.save_dir + '/ucf_model') val_model.load_dict(model) val_model.eval() acc_list = [] for batch_id, data in enumerate(val_reader()): dy_x_data = np.array([x[0] for x in data]).astype('float32') y_data = np.array([[x[1]] for x in data]).astype('int64') img = fluid.dygraph.to_variable(dy_x_data) label = fluid.dygraph.to_variable(y_data) label.stop_gradient = True out, acc = val_model(img, label) if out is not None: acc_list.append(acc.numpy()[0]) val_model.train() return np.mean(acc_list)
def main(): # 读配置文件 config = parse_config() # 载入训练集合 train_data = DataBatchIterator(config=config, is_train=True, dataset="train", batch_size=config.batch_size, shuffle=True) train_data.load() vocab = train_data.vocab # 载入测试集合 test_data = DataBatchIterator(config=config, is_train=False, dataset="test", batch_size=config.batch_size) test_data.set_vocab(vocab) test_data.load() # 测试时 checkpoint = torch.load(config.save_model + ".pt", map_location=config.device) model = checkpoint # model = build_textcnn_model( # vocab, config, train=True) predict, label = test_textcnn_model(model, test_data, config) print(classification_report(label, predict))
def main(argv): if len(argv) < 7: print("Usage: create_build_config.py $PRODUCTNAME $VERSION $BUILDID $PLATFORM $TARGETDIR $UPDATE_CONFIG") sys.exit(1) config = parse_config(argv[6]) data = { 'productName' : argv[1], 'version' : argv[2], 'buildNumber' : argv[3], 'updateChannel' : config.channel, 'platform' : argv[4] } extra_data_files = ['complete_info.json', 'partial_update_info.json'] if sys.platform != "cygwin": extra_data_files.append('complete_lang_info.json') for extra_file in extra_data_files: extra_file_path = os.path.join(argv[5], extra_file) if not os.path.exists(extra_file_path): continue with open(extra_file_path, "r") as f: extra_data = json.load(f) data.update(extra_data) update_all_url_entries(data, channel=config.channel, platform=argv[4], buildid=argv[3], version=argv[2]) with open(os.path.join(argv[5], "build_config.json"), "w") as f: json.dump(data, f, indent=4)
def main(): parser = parse_arguments(COMMANDS) arguments = parser.parse_args() if arguments.debug: logging.basicConfig(level=logging.DEBUG) LOG.debug(parser) LOG.debug(arguments) if arguments.action == "help" or (arguments.action is None and not arguments.i): parser.print_help() exit() if arguments.action == "usage": parser.print_usage() exit() configfile = os.path.join(appdir, '.ptp.conf') if os.path.exists(configfile): config = parse_config(configfile) else: config = parse_env() ptpobj = ptp.PTP(config.get('ptp', {}).get('ApiUser'), config.get('ptp', {}).get('ApiKey'), appdir, logger=LOG) try: perform_actions(ptpobj, arguments, parser=parser) except ConnectionError: LOG.critical("Unable to connect to PTP to perform your request. Please try again later!")
def main(): # 读配置文件 config = parse_config() # 载入测试集合 test_data = DataBatchIterator( config=config, is_train=False, dataset="test", # batch_size=config.batch_size) ) # test_data.set_vocab(vocab) test_data.load() # 加载textcnn模型 model = torch.load('./results/model.pt') # 打印模型信息 print(model) # 测试 accuracy, corrects, size = test_textcnn_model(model, test_data, config) # 打印结果 print('\nEvaluation - acc: {:.4f}%({}/{}) \n'.format( accuracy, corrects, size))
def load(config_raw: t.Dict[str, t.Any] = {}) -> bool: """ Initializes the module """ conf_options = updateConfOptions(CONF_OPTIONS) conf_options.move_to_end("regex:.+") config_data = parse_config(config_raw, conf_options, log) del config_raw if config_data: log.debug("Config loaded") CONFIG.update(config_data) del config_data for id in [key for key in CONFIG if isinstance(CONFIG[key], dict)]: array_config = CONFIG.pop(id) if not validate_id(id): log.warn("'%s' is not a valid ID and will be ignored", id) else: array_object = getArray(id, array_config, log) if array_object: arrays[id] = array_object nodes[id] = array_object.get_node() else: log.error("Failed to configure LED array '%s'", id) return True else: log.error("Error loading config") return False
def main(): # 读配置文件 config = parse_config() # 载入训练集合 train_data = DataBatchIterator( config=config, is_train=True, dataset="train", #batch_size=config.batch_size, shuffle=True) train_data.load() vocab = train_data.vocab # 载入测试集合 valid_data = DataBatchIterator( config=config, is_train=False, dataset="dev", #batch_size=config.batch_size ) valid_data.set_vocab(vocab) valid_data.load() # 构建textcnn模型 model = build_textcnn_model(vocab, config, train=True) print(model) # Do training. padding_idx = vocab.stoi[PAD] train_textcnn_model(model, train_data, valid_data, padding_idx, config) torch.save(model, '%s.pt' % (config.save_model))
def serve(config_file): print('Starting app') config_dict = dict() try: with open(config_file, "r") as f: config_dict = yaml.load(f.read()) except Exception: pass cfg = parse_config(config_dict) logging.basicConfig(level=getattr(logging, cfg.get('log')['level'].upper()), format=cfg.get('log')['format']) repo = BranchesRepository(config=cfg.get('db')) sc = cfg.get('server') server = grpc.server( futures.ThreadPoolExecutor(max_workers=sc.get('max_workers'))) branches_grpc.add_BranchesServicer_to_server(BranchesServicer(repo=repo), server) server.add_insecure_port(sc.get('hostport')) print('Listening to ' + sc.get('hostport')) server.start() try: while True: _ONE_DAY_IN_SECONDS = 60 * 60 * 24 time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: print('Signal handler, shutting down the server') server.stop(0)
def __init__(self): config = parse_config() self.api = config.get("Weather", "api_key") self.zip = config.get("Weather", "zip_code") self.url = "http://api.wunderground.com/api/%s" %self.api self.forecast = [] self.location = self.get_location()
def main(): parser = parse_arguments(COMMANDS) arguments = parser.parse_args() if arguments.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) configfile = os.path.join(appdir, '.ptp.conf') if os.path.exists(configfile): config = parse_config(configfile) else: config = parse_env() ptpobj = ptp.PTP(config.get('ptp', {}).get('ApiUser'), config.get('ptp', {}).get('ApiKey'), appdir, logger=LOG) clientparser = parse_arguments(CLIENT_COMMANDS, prog=PTPClient.prefix, add_help=False) client = PTPClient(ptpobj, clientparser) client.run(config.get('discord', {}).get('token'))
def main(): # 读配置文件 config = parse_config() # 载入测试集合 mylog = open('result.log', mode='a', encoding='utf-8') test_data = DataBatchIterator(config=config, is_train=False, dataset="test", batch_size=config.batch_size, shuffle=True) test_data.load() # 载入textcnn模型 model = torch.load("results/model.pt") #print(model) criterion = nn.CrossEntropyLoss(reduction="sum") # Do training. loss, precision, recall, f1 = test_textcnn_model(model, test_data, criterion, config) print( "test loss: {0:.2f}, precision: {1:.2f}, recall:{2:.2f}, f1:{3:.2f}" .format(loss, precision, recall, f1), file=mylog) mylog.close()
def command(timebook, config, start, end, billing, money, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) start_timestamp = parse_date_time(start) if start else None end_timestamp = parse_date_time(end) if end else None full(db, start_timestamp, end_timestamp, billing, money)
def command(timebook, config, simple, notes, sheet, **kwargs): # get the db cfg = parse_config(config) db = Database(timebook, cfg) if simple and notes: parser.error("you cannot specify both --simple and --notes") now(db, simple, notes, sheet)
def command(timebook, config, rate, sheet, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) sheet = sheet or db.get_current_sheet() if sheet not in db.get_sheet_names(): parser.error('%s is not a known timesheet' % sheet) money(db, sheet, rate)
def main(): workdir = sys.argv[1] updater_path = UpdaterPath(workdir) updater_path.ensure_dir_exist() mar_name_prefix = sys.argv[2] update_config = sys.argv[3] platform = sys.argv[4] build_id = sys.argv[5] current_build_path = updater_path.get_current_build_dir() mar_dir = updater_path.get_mar_dir() temp_dir = updater_path.get_previous_build_dir() update_dir = updater_path.get_update_dir() current_build_path = add_single_dir(current_build_path) if sys.platform == "cygwin": current_build_path = add_single_dir(current_build_path) config = parse_config(update_config) updates = download_mar_for_update_channel_and_platform(config, platform, temp_dir) data = {"partials": []} for build, update in updates.items(): file_name = generate_file_name(build_id, build, mar_name_prefix) mar_file = os.path.join(update_dir, file_name) subprocess.call([os.path.join(current_dir_path, 'make_incremental_update.sh'), convert_to_native(mar_file), convert_to_native(update["complete"]), convert_to_native(current_build_path)]) sign_mar_file(update_dir, config, mar_file, mar_name_prefix) partial_info = {"file":get_file_info(mar_file, config.base_url), "from": build, "to": build_id, "languages": {}} # on Windows we don't use language packs if sys.platform != "cygwin": for lang, lang_info in update["languages"].items(): lang_name = generate_lang_file_name(build_id, build, mar_name_prefix, lang) # write the file into the final directory lang_mar_file = os.path.join(update_dir, lang_name) # the directory of the old language file is of the form # workdir/mar/language/en-US/LibreOffice_<version>_<os>_archive_langpack_<lang>/ language_dir = add_single_dir(os.path.join(mar_dir, "language", lang)) subprocess.call([os.path.join(current_dir_path, 'make_incremental_update.sh'), convert_to_native(lang_mar_file), convert_to_native(lang_info), convert_to_native(language_dir)]) sign_mar_file(update_dir, config, lang_mar_file, mar_name_prefix) # add the partial language info partial_info["languages"][lang] = get_file_info(lang_mar_file, config.base_url) data["partials"].append(partial_info) with open(os.path.join(update_dir, "partial_update_info.json"), "w") as f: json.dump(data, f)
def command(timebook, config, at, sheet, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) sheet = sheet or db.get_current_sheet() timestamp = parse_date_time_or_now(at) if sheet not in db.get_sheet_names(): parser.error('%s is not a known timesheet' % sheet) end(db, sheet, timestamp)
def command(timebook, config, messages, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) active = db.get_current_active_info() if not active: parser.error('timesheet not active') entry_id=active[0] message = ' '.join(messages) alter(db, entry_id, message)
def profile(config_file): entries = config.parse_config(config_file) last_results = read_last_results() results = [] for entry in entries: result = Result() result.name = entry.name last_result = get_result_from_name(result.name, last_results) if last_result: last_result_time = last_result.time else: last_result_time = None try: st = time.time() for _ in range(entry.rounds): entry.call(*(entry.args[0]), **(entry.args[1])) result.time = (time.time() - st) * 1000. / entry.rounds if last_result_time: result.time_diff_percentage = 100 * ( result.time - last_result_time) / last_result_time pr = cProfile.Profile() pr.enable() for _ in range(entry.rounds): entry.call(*(entry.args[0]), **(entry.args[1])) pr.disable() ps = pstats.Stats(pr) stats_file = tempfile.NamedTemporaryFile() ps.dump_stats(stats_file.name) s = marshal.load(stats_file.file) # pprint.pprint(s) ps = stats.parse_stats(s) ps = stats.sort_stats_by_total_time(ps) ps_filtered = [] for e in ps: if 'Profiler' not in e.name: ps_filtered.append(e) result.top_calls_total_time = ps_filtered[:5] except Exception as e: result.exception = e results.append(result) return results
def main(): product_name = sys.argv[1] buildid = sys.argv[2] platform = sys.argv[3] update_dir = sys.argv[4] update_config = sys.argv[5] config = parse_config(update_config) upload_url = replace_variables_in_string(config.upload_url, channel=config.channel, buildid=buildid, platform=platform) target_url, target_dir = upload_url.split(':') command = "ssh %s 'mkdir -p %s'"%(target_url, target_dir) print(command) subprocess.call(command, shell=True) for file in os.listdir(update_dir): if file.endswith('.mar'): subprocess.call(['scp', convert_to_unix(os.path.join(update_dir, file)), upload_url])
def start(): """Main method for start.""" _prepare_logging() args = _parse_args() config = parse_config(args.config_path) starter = _create_starter(config) starter.create_connection() signalers = starter.start_signalers(["EURUSD-OTC"]) traders = starter.start_traders(["EURUSD-OTC"]) while True: for signaler in signalers: signal = signaler.get_signal() if signal: for trader in traders: if signaler.active == trader.active: trader.trade(signal)
def command(timebook, config, sheet, start, end, billing, format, money, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) sheet = sheet or db.get_current_sheet() start_timestamp = parse_date_time(start) if start else None end_timestamp = parse_date_time(end) if end else None if billing: if start or end: parser.error('if you specify --billing, you cannot specify a start ' \ 'or end ') billing_time = db.get_billing_start_time(sheet) if billing_time: start_timestamp = billing_time display(db, sheet, start_timestamp, end_timestamp, format, money)
def main(): if len(sys.argv) < 5: print("Usage: create_full_mar_for_languages.py $PRODUCTNAME $WORKDIR $TARGETDIR $TEMPDIR $FILENAMEPREFIX $UPDATE_CONFIG") sys.exit(1) update_config = sys.argv[4] filename_prefix = sys.argv[3] workdir = sys.argv[2] product_name = sys.argv[1] updater_path = UpdaterPath(workdir) target_dir = updater_path.get_update_dir() temp_dir = updater_path.get_language_dir() config = parse_config(update_config) language_pack_dir = os.path.join(workdir, "installation", product_name + "_languagepack", "archive", "install") language_packs = os.listdir(language_pack_dir) lang_infos = [] for language in language_packs: if language == 'log': continue language_dir = os.path.join(language_pack_dir, language) language_file = os.path.join(language_dir, os.listdir(language_dir)[0]) directory = uncompress_file_to_dir(language_file, os.path.join(temp_dir, language)) mar_file_name = make_complete_mar_name(target_dir, filename_prefix, language) subprocess.call([os.path.join(current_dir_path, 'make_full_update.sh'), mar_file_name, directory]) sign_mar_file(target_dir, config, mar_file_name, filename_prefix) lang_infos.append(create_lang_infos(mar_file_name, language, config.base_url)) with open(os.path.join(target_dir, "complete_lang_info.json"), "w") as language_info_file: json.dump({'languages' : lang_infos}, language_info_file, indent=4)
def command(timebook, config, switch, start_time, end_time, messages, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) if switch: commands.switch.switch(db, switch) sheet = switch else: sheet = db.get_current_sheet() timestamp_in=parse_date_time(start_time) timestamp_out=parse_date_time(end_time) current_start = db.get_start_time(sheet) if current_start: if timestamp_out > current_start[1]: parser.error('cannot put this entry into the timesheet because ' \ 'it may cause overlap with the active timer - clock out first') message = ' '.join(messages) put(db, sheet, timestamp_in, timestamp_out, message)
def main(): if len(sys.argv) < 5: print("Usage: create_full_mar_for_languages.py $PRODUCTNAME $WORKDIR $FILENAMEPREFIX $UPDATE_CONFIG") sys.exit(1) update_config = sys.argv[4] filename_prefix = sys.argv[3] workdir = sys.argv[2] product_name = sys.argv[1] if len(update_config) == 0: print("missing update config") sys.exit(1) update_path = UpdaterPath(workdir) update_path.ensure_dir_exist() target_dir = update_path.get_update_dir() temp_dir = update_path.get_current_build_dir() config = parse_config(update_config) tar_dir = os.path.join(workdir, "installation", product_name, "archive", "install", "en-US") tar_file = os.path.join(tar_dir, os.listdir(tar_dir)[0]) uncompress_dir = uncompress_file_to_dir(tar_file, temp_dir) mar_file = make_complete_mar_name(target_dir, filename_prefix) subprocess.call([os.path.join(current_dir_path, 'make_full_update.sh'), mar_file, uncompress_dir]) sign_mar_file(target_dir, config, mar_file, filename_prefix) file_info = { 'complete' : get_file_info(mar_file, config.base_url) } with open(os.path.join(target_dir, 'complete_info.json'), "w") as complete_info_file: json.dump(file_info, complete_info_file, indent = 4)
import subprocess import time import datetime import os from config import parse_config def change_background(): hour = time.localtime().tm_hour # get current hour picture = os.path.join(path, str(hour) + ".jpg") # get the corresponding picture background="file://" + picture subprocess.call(['gsettings', 'set', 'org.gnome.desktop.background', 'picture-uri', background]) #Change the picture if __name__ == "__main__": config = parse_config() path = config.get("Settings", "image_folder") # main loop while 1: change_background() wait = 60 - datetime.datetime.today().time().minute #Calculate how long to wait time.sleep(wait*60)
def command(timebook, config, sheet, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) switch(db, sheet)
def command(timebook, config, **kwargs): # get the db cfg=parse_config(config) db=Database(timebook, cfg) backend(timebook)
def test_additive_ops(self): res = config.parse_config(test_additive_ops_1)
def test_multiplicative_ops(self): res = config.parse_config(test_multiplicative_ops_1)
#!/usr/bin/env python from bot import MarvinBot from tui import MarvinTUI import threading import config # see http://www.devshed.com/c/a/Python/IRC-on-a-Higher-Level/1/ if __name__ == '__main__': args = config.parse_options() conf = config.parse_config(args) all_joined = threading.Event() tui = MarvinTUI(conf) bot = MarvinBot(conf, tui, all_joined) bot.start() all_joined.wait() try: tui.cmdloop() except KeyboardInterrupt: tui.do_quit()
def test_stmt_suite(self): res = config.parse_config(test_stmt_suite_1)
def test_atoms(self): res = config.parse_config(test_atoms_1)
def test_unary_ops(self): res = config.parse_config(test_unary_ops_1)
def test_shift_ops(self): res = config.parse_config(test_shift_ops_1)