def writefile(self, filename, sha1v): file = self.getfile(filename) path = self.dir + filename if os.path.isfile(path): input = FileUtil.open(path, "r") oldfile = input.read() input.close() else: oldfile = None output = FileUtil.open(path, "wb") output.write(file) print "Update " + filename + " OK!" output.close() input = FileUtil.open(path, "rb") sha1vv = FileUtil.get_file_sha1(input) # print sha1v.strip() # print sha1vv.strip() input.close() if sha1v.strip() == sha1vv.strip(): print "Verify " + filename + " OK!" else: print "Verify " + filename + " Fail!" if oldfile: output = FileUtil.open(path, "wb") output.write(oldfile) output.close() print "Recover " + filename + " OK!" if filename.strip() == "/autoupdate.ini".strip(): newconfig = Config(__config__) newconfig.writeconfig("autoupdate", "server", common.AUTOUPDATE_SERVER_STR) print "ReWrite /autoupdate.ini OK!" common.reloadini() print "ReLoad /autoupdate.ini OK!"
def writefile(self, filename, sha1v): file = self.getfile(filename) path = self.dir + filename if os.path.isfile(path): input = FileUtil.open(path, "r") oldfile = input.read() input.close() else: oldfile = None output = FileUtil.open(path, "wb") output.write(file) print 'Update ' + filename + ' OK!' output.close() input = FileUtil.open(path, "rb") sha1vv = FileUtil.get_file_sha1(input) #print sha1v.strip() #print sha1vv.strip() input.close() if sha1v.strip() == sha1vv.strip(): print 'Verify ' + filename + ' OK!' else: print 'Verify ' + filename + ' Fail!' if oldfile: output = FileUtil.open(path, "wb") output.write(oldfile) output.close() print 'Recover ' + filename + ' OK!' if filename.strip() == '/autoupdate.ini'.strip(): newconfig = Config(__config__) newconfig.writeconfig('autoupdate', 'server', common.AUTOUPDATE_SERVER_STR) print 'ReWrite /autoupdate.ini OK!' common.reloadini() print 'ReLoad /autoupdate.ini OK!'
def test_cwe_update_config_and_code(self): # Originally this was testing the no update case.. but # That is tricky to record, any updates to the code end up # causing issues due to checksum mismatches which imply updating # the function code / which invalidate the recorded data and # the focus of the test. session_factory = self.replay_flight_data( 'test_cwe_update', zdata=True) p = Policy({ 'resource': 's3', 'name': 's3-bucket-policy', 'mode': { 'type': 'cloudtrail', 'events': ["CreateBucket"], }, 'filters': [ {'type': 'missing-policy-statement', 'statement_ids': ['RequireEncryptedPutObject']}], 'actions': ['no-op'] }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.addCleanup(mgr.remove, pl) p = Policy({ 'resource': 's3', 'name': 's3-bucket-policy', 'mode': { 'type': 'cloudtrail', 'memory': 256, 'events': [ "CreateBucket", {'event': 'PutBucketPolicy', 'ids': 'requestParameters.bucketName', 'source': 's3.amazonaws.com'}] }, 'filters': [ {'type': 'missing-policy-statement', 'statement_ids': ['RequireEncryptedPutObject']}], 'actions': ['no-op'] }, Config.empty()) output = self.capture_logging('custodian.lambda', level=logging.DEBUG) result2 = mgr.publish(PolicyLambda(p), 'Dev', role=self.role) lines = output.getvalue().strip().split('\n') self.assertTrue( 'Updating function custodian-s3-bucket-policy code' in lines) self.assertTrue( 'Updating function: custodian-s3-bucket-policy config' in lines) self.assertEqual(result['FunctionName'], result2['FunctionName']) # drive by coverage functions = [i for i in mgr.list_functions() if i['FunctionName'] == 'custodian-s3-bucket-policy'] self.assertTrue(len(functions), 1) start = 0L end = long(time.time() * 1000) self.assertEqual(list(mgr.logs(pl, start, end)), [])
def writefile(self,filename,sha1v): file = self.getfile(filename) path = self.dir+filename if os.path.isfile(path): input = FileUtil.open(path,"r") oldfile = input.read() input.close() else: oldfile = None output = FileUtil.open(path,"wb") output.write(file) print 'Update '+filename+' OK!' output.close() input = FileUtil.open(path,"rb") sha1vv = FileUtil.get_file_sha1(input) #print sha1v.strip() #print sha1vv.strip() input.close() if sha1v.strip()==sha1vv.strip() : print 'Verify '+filename+' OK!' else: print 'Verify '+filename+' Fail!' if oldfile: output = FileUtil.open(path,"wb") output.write(oldfile) output.close() print 'Recover '+filename+' OK!' if filename.strip() == '/autoupdate.ini'.strip(): newconfig = Config(__config__) newconfig.writeconfig('autoupdate', 'server',common.AUTOUPDATE_SERVER_STR) print 'ReWrite /autoupdate.ini OK!' common.reloadini() print 'ReLoad /autoupdate.ini OK!'
def logger(env_key='LOGGING_TYPE'): logging_type = os.getenv(env_key, 'local') log_formatter = logging.Formatter( '%(asctime)s [%(levelname)-8s] [%(filename)s:%(lineno)d] %(message)s', datefmt='%d/%m/%Y %H:%M:%S') root_logger = logging.getLogger() root_logger.setLevel(logging.INFO) info_file_handler = logging.FileHandler(Config().log_file("info.log")) info_file_handler.setFormatter(log_formatter) info_file_handler.setLevel(logging.INFO) root_logger.addHandler(info_file_handler) error_file_handler = logging.FileHandler(Config().log_file("error.log")) error_file_handler.setFormatter(log_formatter) error_file_handler.setLevel(logging.ERROR) root_logger.addHandler(error_file_handler) if logging_type == 'local': consoleHandler = logging.StreamHandler() consoleHandler.setFormatter(log_formatter) consoleHandler.setLevel(logging.INFO) root_logger.addHandler(consoleHandler)
def __init__(self): Config.homeDirectory = JavaSystem.getProperty( "user.home") + File.separator + ".FoxTelem" Config.currentDir = JavaSystem.getProperty("user.dir") Config.initSatelliteManager() Config.initPayloadStore() Config.initPassManager() Config.initSequence() Config.initServerQueue()
def get_monitors(): will_moniter = [] spiders = Config.get_spiders() for monitor in Config.get_monitors(): if monitor not in spiders: print('Cannot found %s in config_huawei.json file.' % monitor) exit(1) will_moniter.append([monitor, spiders[monitor]]) return will_moniter
def test_has_section(self): config = Config() self.assertTrue(config.has_section("general")) self.assertTrue(config.has_section("lftp")) self.assertTrue(config.has_section("controller")) self.assertTrue(config.has_section("web")) self.assertTrue(config.has_section("autoqueue")) self.assertFalse(config.has_section("nope")) self.assertFalse(config.has_section("from_file")) self.assertFalse(config.has_section("__init__"))
def RunEpguide(): """ glowna petla aplikacji, odczytuje konfiguracje, uruchamia operacje """ config = Config() config.ParseCommandLine(sys.argv) if config.options.use_config: config.ReadConfigFile() epguide = EpGuide(config) epguide.Execute()
def _createListItem(s, l, t): value, attrs = t[0] if type(value) == dict: g = Config.Group(attrs) g.update(value) return g elif type(value) == list: l = Config.List(attrs) l += value return [l] else: return Config.Item(value, attrs)
def testMerging(self): c1 = Config.Config() c2 = Config.Config() c1.load(mergeConf1) c2.load(mergeConf2) assert c1.item == "value1" c1.merge(c2) assert c1.item == "value2" assert "item1" in c1.group assert "item2" in c1.group assert "subitem1" in c1.group.subgroup assert "subitem2" in c1.group.subgroup assert "group2" in c1
def __init__(self): rospy.init_node('Gateway', log_level=rospy.DEBUG) server_config_file = rospy.get_param("~server_config_file") self.config = Config(server_config_file) self.pf = ProtocolFactory(self.config) self.run_id = rospy.get_param("run_id") print("runid = ", self.run_id) self.node_list = rosnode.get_node_names() self.timer = Timer() self.monitor = Monitor(self.node_list, self.timer) self._server_request = {} # stores server_request self._event_bus = Queue() self._heartbeat_timeout_job = None self._tele_report_job = None self._report_car_job = None self._report_task_job = None self._service = DrivingTaskService(self._event_bus) self.__client = Connector(self._event_bus, self.config.host, self.config.port) self._handler_map = {} self._event_handler_map = {} self._add_command_handler() self._add_event_handler() self._web_server = WebServer(self.monitor, self._service, self.config) self._tele_control_service = TeleControlService()
def start_aircast(hostname, port): sample_queue = Queue() io_loop = tornado.ioloop.IOLoop.current() stream_url = "http://{}:{}{}".format(hostname, port, STREAM_ROUTE) caster = Caster(stream_url) config = Config(sample_rate=44100, channels=2, bits_per_sample=16) broadcaster = Broadcaster(config, sample_queue, io_loop) shairport = Shairport(caster.device_name, config, sample_queue) app = make_app(broadcaster) def shairport_status_cb(event, _): if event == 'playing': caster.start_stream() shairport.add_callback(shairport_status_cb) broadcaster.start() shairport.start() app.listen(port) logger.info("AirCast ready. Advertising as '%s'", caster.device_name) try: io_loop.start() except KeyboardInterrupt: pass finally: io_loop.stop() shairport.stop() broadcaster.stop() shairport.join(5) broadcaster.join(5)
def _create_default_config() -> Config: """ Create a config with default values :return: """ config = Config() config.general.debug = False config.lftp.remote_address = Seedsync.__CONFIG_DUMMY_VALUE config.lftp.remote_username = Seedsync.__CONFIG_DUMMY_VALUE config.lftp.remote_port = 22 config.lftp.remote_path = Seedsync.__CONFIG_DUMMY_VALUE config.lftp.local_path = Seedsync.__CONFIG_DUMMY_VALUE config.lftp.remote_path_to_scan_script = "/tmp/scanfs" config.lftp.num_max_parallel_downloads = 2 config.lftp.num_max_parallel_files_per_download = 4 config.lftp.num_max_connections_per_root_file = 4 config.lftp.num_max_connections_per_dir_file = 4 config.lftp.num_max_total_connections = 16 config.controller.interval_ms_remote_scan = 30000 config.controller.interval_ms_local_scan = 10000 config.controller.interval_ms_downloading_scan = 1000 config.controller.extract_path = "/tmp" config.controller.use_local_path_as_extract_path = True config.web.port = 8800 config.autoqueue.enabled = True config.autoqueue.patterns_only = False config.autoqueue.auto_extract = True return config
def setUp(self): self.logger = logging.getLogger(TestAutoQueue.__name__) handler = logging.StreamHandler(sys.stdout) self.logger.addHandler(handler) self.logger.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s - %(levelname)s - %(name)s - %(message)s") handler.setFormatter(formatter) self.context = MagicMock() self.context.config = Config() self.context.config.autoqueue.enabled = True self.context.config.autoqueue.patterns_only = True self.context.config.autoqueue.auto_extract = True self.context.logger = self.logger self.controller = MagicMock() self.controller.get_model_files_and_add_listener = MagicMock() self.controller.queue_command = MagicMock() self.model_listener = None self.initial_model = [] def get_model(): return self.initial_model def get_model_and_capture_listener(listener: IModelListener): self.model_listener = listener return get_model() self.controller.get_model_files.side_effect = get_model self.controller.get_model_files_and_add_listener.side_effect = get_model_and_capture_listener
def test_cwe_schedule(self): session_factory = self.replay_flight_data( 'test_cwe_schedule', zdata=True) p = Policy({ 'resource': 'ec2', 'name': 'periodic-ec2-checker', 'mode': { 'type': 'periodic', 'schedule': 'rate(1 day)' } }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, pl) result = mgr.publish(pl, 'Dev', role=self.role) self.assert_items( result, {'FunctionName': 'custodian-periodic-ec2-checker', 'Handler': 'custodian_policy.run', 'MemorySize': 512, 'Runtime': RUNTIME, 'Timeout': 60}) events = session_factory().client('events') result = events.list_rules(NamePrefix="custodian-periodic-ec2-checker") self.assert_items( result['Rules'][0], { "State": "ENABLED", "ScheduleExpression": "rate(1 day)", "Name": "custodian-periodic-ec2-checker"})
def test_consolidate_query_filter(self): session_factory = self.replay_flight_data('test_emr_query_ids') ctx = Bag(session_factory=session_factory, log_dir='', options=Config.empty()) query = { 'query': [{ 'tag:foo': 'val1' }, { 'tag:foo': 'val2' }, { 'tag:bar': 'val3' }] } mgr = emr.EMRCluster(ctx, query) self.assertEqual(mgr.consolidate_query_filter(), [{ 'Values': ['val1', 'val2'], 'Name': 'tag:foo' }, { 'Values': ['val3'], 'Name': 'tag:bar' }])
def test_section_lftp(self): config = Config() config.lftp.remote_address = "server.remote.com" config.lftp.remote_username = "******" config.lftp.remote_port = 3456 config.lftp.remote_path = "/remote/server/path" config.lftp.local_path = "/local/server/path" config.lftp.remote_path_to_scan_script = "/remote/server/path/to/script" config.lftp.num_max_parallel_downloads = 6 config.lftp.num_max_parallel_files_per_download = 7 config.lftp.num_max_connections_per_root_file = 2 config.lftp.num_max_connections_per_dir_file = 3 config.lftp.num_max_total_connections = 4 out = SerializeConfig.config(config) out_dict = json.loads(out) self.assertIn("lftp", out_dict) self.assertEqual("server.remote.com", out_dict["lftp"]["remote_address"]) self.assertEqual("user-on-remote-server", out_dict["lftp"]["remote_username"]) self.assertEqual(3456, out_dict["lftp"]["remote_port"]) self.assertEqual("/remote/server/path", out_dict["lftp"]["remote_path"]) self.assertEqual("/local/server/path", out_dict["lftp"]["local_path"]) self.assertEqual("/remote/server/path/to/script", out_dict["lftp"]["remote_path_to_scan_script"]) self.assertEqual(6, out_dict["lftp"]["num_max_parallel_downloads"]) self.assertEqual( 7, out_dict["lftp"]["num_max_parallel_files_per_download"]) self.assertEqual(2, out_dict["lftp"]["num_max_connections_per_root_file"]) self.assertEqual(3, out_dict["lftp"]["num_max_connections_per_dir_file"]) self.assertEqual(4, out_dict["lftp"]["num_max_total_connections"])
def parseConfig(config, template=None): """ Parses the given tracer configuration and returns a tree of configuration symbols. @param config Config file text @param template Optional configuration to use a template. @return: A dictionary tree of configuration values """ if not template: template = Config.Config() items = template for itemName, item in configFile.parseString(config): if not item: Log.warn("Empty top-level item '%s'." % itemName) if itemName in items and isinstance(item, Config.Group): items[itemName].update(item) elif itemName in items and isinstance(item, Config.List): items[itemName] += item else: items[itemName] = item return items
def test_cwe_asg_instance(self): session_factory = self.replay_flight_data('test_cwe_asg', zdata=True) p = Policy({ 'resource': 'asg', 'name': 'asg-spin-detector', 'mode': { 'type': 'asg-instance-state', 'events': ['launch-failure']} }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.assert_items( result, {'FunctionName': 'c7n-asg-spin-detector', 'Handler': 'c7n_policy.run', 'MemorySize': 512, 'Runtime': RUNTIME, 'Timeout': 60}) events = session_factory().client('events') result = events.list_rules(NamePrefix="c7n-asg-spin-detector") self.assert_items( result['Rules'][0], {"State": "ENABLED", "Name": "c7n-asg-spin-detector"}) self.assertEqual( json.loads(result['Rules'][0]['EventPattern']), {"source": ["aws.autoscaling"], "detail-type": ["EC2 Instance Launch Unsuccessful"]}) mgr.remove(pl)
def testDuplicateItems(self): c = Config.Config() try: c.load("foo: { dup dup }") raise RuntimeError("ValueError not raised.") except ValueError: pass
def _createGroupItem(s, l, t): try: name, attrs, value = t[0] except ValueError: # An empty list is eaten by pyparsing name, attrs, value = t[0][0], t[0][1], [] if type(value) == dict: g = Config.Group(attrs) g.update(value) return name, g elif type(value) == list: l = Config.List(attrs) l += value return name, l else: return name, Config.Item(value, attrs)
def test_cwe_schedule(self): session_factory = self.replay_flight_data( 'test_cwe_schedule', zdata=True) p = Policy({ 'resource': 'ec2', 'name': 'periodic-ec2-checker', 'mode': { 'type': 'periodic', 'schedule': 'rate(1 day)' } }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.assert_items( result, {'FunctionName': 'c7n-periodic-ec2-checker', 'Handler': 'c7n_policy.run', 'MemorySize': 512, 'Runtime': RUNTIME, 'Timeout': 60}) events = session_factory().client('events') result = events.list_rules(NamePrefix="c7n-periodic-ec2-checker") self.assert_items( result['Rules'][0], { "State": "ENABLED", "ScheduleExpression": "rate(1 day)", "Name": "c7n-periodic-ec2-checker"}) mgr.remove(pl)
def test_section_general(self): config = Config() config.general.debug = True out = SerializeConfig.config(config) out_dict = json.loads(out) self.assertIn("general", out_dict) self.assertEqual(True, out_dict["general"]["debug"])
def _detect_incomplete_config(config: Config) -> bool: config_dict = config.as_dict() for sec_name in config_dict: for key in config_dict[sec_name]: if Seedsync.__CONFIG_DUMMY_VALUE == config_dict[sec_name][key]: return True return False
def __init__(self, dataset_name, cls_type="duck"): self.config = Config(dataset_name='linemod', cls_type=cls_type) self.bs_utils = Basic_Utils(self.config) self.dataset_name = dataset_name self.xmap = np.array([[j for i in range(640)] for j in range(480)]) self.ymap = np.array([[i for i in range(640)] for j in range(480)]) self.trancolor = transforms.ColorJitter(0.2, 0.2, 0.2, 0.05) self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.224]) self.obj_dict = self.config.lm_obj_dict self.cls_type = cls_type self.cls_id = self.obj_dict[cls_type] print("cls_id in lm_dataset.py", self.cls_id) self.root = os.path.join(self.config.lm_root, 'Linemod_preprocessed') self.cls_root = os.path.join(self.root, "data/%02d/" % self.cls_id) self.rng = np.random meta_file = open(os.path.join(self.cls_root, 'gt.yml'), "r") self.meta_lst = yaml.load(meta_file) if dataset_name == 'train': self.add_noise = True real_img_pth = os.path.join(self.cls_root, "train.txt") self.real_lst = self.bs_utils.read_lines(real_img_pth) rnd_img_pth = os.path.join( self.root, "renders/{}/file_list.txt".format(cls_type)) try: self.rnd_lst = self.bs_utils.read_lines(rnd_img_pth) except: # No synthetic rendered data. print( "Train without rendered data from https://github.com/ethnhe/raster_triangle" ) self.rnd_lst = [] fuse_img_pth = os.path.join( self.root, "fuse/{}/file_list.txt".format(cls_type)) try: self.fuse_lst = self.bs_utils.read_lines(fuse_img_pth) except: # No fuse dataset print( "Train without fuse data from https://github.com/ethnhe/raster_triangle" ) self.fuse_lst = [] self.all_lst = self.real_lst + self.rnd_lst + self.fuse_lst else: self.add_noise = False self.pp_data = None if os.path.exists(self.config.preprocessed_testset_pth ) and self.config.use_preprocess: print('Loading valtestset.') with open(self.config.preprocessed_testset_pth, 'rb') as f: self.pp_data = pkl.load(f) self.all_lst = [i for i in range(len(self.pp_data))] print('Finish loading valtestset.') else: tst_img_pth = os.path.join(self.cls_root, "test.txt") self.tst_lst = self.bs_utils.read_lines(tst_img_pth) self.all_lst = self.tst_lst print("{}_dataset_size: ".format(dataset_name), len(self.all_lst))
def create_blueprint(communicator): robot_status = Blueprint('robot_status', __name__) zyre_communicator = communicator config = Config() @robot_status.route('/') @robot_status.route('/robot_status') def robot_info(): session['uid'] = uuid.uuid4() return render_template('robot_status.html') @socketio.on('connect', namespace='/robot_status') def on_connect(): robots = config.get_robots() emit('deployed_robots', json.dumps(robots)) global status_thread with status_thread_lock: if status_thread is None: status_thread = socketio.start_background_task( target=get_robot_status, robot_ids=robots) def get_robot_status(robot_ids): while True: for robot in robot_ids: status_msg = zyre_communicator.get_status(robot) socketio.emit('status_msg', status_msg, namespace='/robot_status') socketio.sleep(0.1) return robot_status
def test_cwe_asg_instance(self): session_factory = self.replay_flight_data('test_cwe_asg', zdata=True) p = Policy({ 'resource': 'asg', 'name': 'asg-spin-detector', 'mode': { 'type': 'asg-instance-state', 'events': ['launch-failure']} }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) self.addCleanup(mgr.remove, pl) result = mgr.publish(pl, 'Dev', role=self.role) self.assert_items( result, {'FunctionName': 'custodian-asg-spin-detector', 'Handler': 'custodian_policy.run', 'MemorySize': 512, 'Runtime': RUNTIME, 'Timeout': 60}) events = session_factory().client('events') result = events.list_rules(NamePrefix="custodian-asg-spin-detector") self.assert_items( result['Rules'][0], {"State": "ENABLED", "Name": "custodian-asg-spin-detector"}) self.assertEqual( json.loads(result['Rules'][0]['EventPattern']), {"source": ["aws.autoscaling"], "detail-type": ["EC2 Instance Launch Unsuccessful"]})
def test_section_web(self): config = Config() config.web.port = 8080 out = SerializeConfig.config(config) out_dict = json.loads(out) self.assertIn("web", out_dict) self.assertEqual(8080, out_dict["web"]["port"])
def setUp(self): # Create a configuration self.config = Config.Config() self.config["library"] = "testlib" # Create a basic library for testing self.library = Library.Library("test") voidType = Library.Type("void") intType = Library.Type("int") f = Library.Function("func1", voidType) p = Library.Parameter("parameter", intType) f.parameters[p.name] = p self.library.functions[f.name] = f f = Library.Function("func2", intType) p1 = Library.Parameter("p1", intType) p2 = Library.Parameter("p2", intType) f.parameters[p1.name] = p1 f.parameters[p2.name] = p2 self.library.functions[f.name] = f # Register some types self.library.typeMap["int"] = "int" self.library.typeMap["void"] = "void" # Define platform properties self.platform = SymbianPlatform(self.config)
def get_s3_output(self): output = S3Output( ExecutionContext( None, Bag(name="xyz"), Config.empty(output_dir="s3://cloud-custodian/policies"))) self.addCleanup(shutil.rmtree, output.root_dir) return output
def testEmptyGroupAndList(self): c = Config.Config() c.load(emptyGroupAndList) assert "emptygroup" in c assert "emptylist" in c assert len(c.emptylist) == 0 assert len(c.emptygroup) == 0
def testGroups(self): c = Config.Config() c.load(groupConf) assert "group" in c assert len(c.group) == 2 assert c.group["list1"] == ["sublist1", "sublist2"] assert c.group["list2"] == ["sublist1", "sublist2"]
def test_section_autoqueue(self): config = Config() config.autoqueue.enabled = True config.autoqueue.patterns_only = False out = SerializeConfig.config(config) out_dict = json.loads(out) self.assertIn("autoqueue", out_dict) self.assertEqual(True, out_dict["autoqueue"]["enabled"]) self.assertEqual(False, out_dict["autoqueue"]["patterns_only"])
def update(self): print "Checking for new update..." versionfile = self.netopen("/" + common.CONFIG_VERSIONFILE) print "Show Server Version Message:" print versionfile oldsha1 = self.old_file_sha1_ini path = common.CONFIG_SHA1 + ".tmp" FileUtil.if_has_file_remove(path) self.getnewsha1(path, oldsha1) newsha1 = Config(path) for path, sha1v in newsha1.getsection("FILE_SHA1"): if not (sha1v == oldsha1.getconfig("FILE_SHA1", path)): oldpath = path path = path.replace("$path$", "") path = path.replace("\\", "/") self.writefile(path, sha1v) FileUtil.if_has_file_remove(path) print "Finished Update" print "Cleaning DIR" self.cleandir() print "Finished Clean"
def test_get_emr_by_ids(self): session_factory = self.replay_flight_data( 'test_emr_query_ids') ctx = Bag( session_factory=session_factory, log_dir='', options=Config.empty()) mgr = emr.EMRCluster(ctx, {}) resources = mgr.get_resources(["j-1EJMJNTXC63JW"]) self.assertEqual(resources[0]['Id'], "j-1EJMJNTXC63JW")
def test_config_rule_provision(self): session_factory = self.replay_flight_data('test_config_rule') p = Policy({ 'resource': 'security-group', 'name': 'sg-modified', 'mode': {'type': 'config-rule'}, }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.assertEqual(result['FunctionName'], 'custodian-sg-modified') self.addCleanup(mgr.remove, pl)
def makehash(dir,topdown=True): try : print 'Generating '+common.CONFIG_SHA1+' table...' FileUtil.if_has_file_remove(common.CONFIG_SHA1) sha1 = Config(common.CONFIG_SHA1) for root, dirs, files in os.walk(dir, topdown): for name in files: path = os.path.join(root,name) newpath = path.replace(dir,'$path$') regexpath = path.replace(dir,'.') if regexpath.startswith(common.REGEX_START) or regexpath.endswith(common.REGEX_END): continue else: sha1v = FileUtil.sumfile(path) sha1.writeconfig('FILE_SHA1',newpath,sha1v) print 'DONE generate '+common.CONFIG_SHA1+' table!' return sha1 except Exception as e: print 'FAIL to generate '+common.CONFIG_SHA1+' table!' print e sys.exit()
def update(self): print 'Checking for new update...' versionfile = self.netopen('/'+common.CONFIG_VERSIONFILE) print "Show Server Version Message:" print versionfile oldsha1 = self.old_file_sha1_ini path = common.CONFIG_SHA1+'.tmp' FileUtil.if_has_file_remove(path) self.getnewsha1(path,oldsha1) newsha1 = Config(path) for path, sha1v in newsha1.getsection('FILE_SHA1'): if not (sha1v == oldsha1.getconfig('FILE_SHA1',path)): oldpath = path path = path.replace('$path$','') path = path.replace('\\','/') self.writefile(path,sha1v) FileUtil.if_has_file_remove(path) print 'Finished Update' print 'Cleaning DIR' self.cleandir() print 'Finished Clean'
def test_consolidate_query_filter(self): session_factory = self.replay_flight_data( 'test_emr_query_ids') ctx = Bag( session_factory=session_factory, log_dir='', options=Config.empty()) query = { 'query': [ {'tag:foo': 'val1'}, {'tag:foo': 'val2'}, {'tag:bar': 'val3'} ] } mgr = emr.EMRCluster(ctx, query) self.assertEqual( mgr.consolidate_query_filter(), [ {'Values': ['val1', 'val2'], 'Name': 'tag:foo'}, {'Values': ['val3'], 'Name': 'tag:bar'}, # default query { 'Values': ['WAITING', 'RUNNING', 'BOOTSTRAPPING'], 'Name': 'ClusterStates', }, ] ) query = { 'query': [ {'tag:foo': 'val1'}, {'tag:foo': 'val2'}, {'tag:bar': 'val3'}, {'ClusterStates': 'terminated'}, ] } mgr = emr.EMRCluster(ctx, query) self.assertEqual( mgr.consolidate_query_filter(), [ {'Values': ['val1', 'val2'], 'Name': 'tag:foo'}, {'Values': ['val3'], 'Name': 'tag:bar'}, # verify default is overridden { 'Values': ['terminated'], 'Name': 'ClusterStates', }, ] )
def test_resource_permissions(self): self.capture_logging('c7n.cache') missing = [] cfg = Config.empty() for k, v in manager.resources.items(): p = Bag({'name': 'permcheck', 'resource': k}) ctx = self.get_context(config=cfg, policy=p) mgr = v(ctx, p) perms = mgr.get_permissions() if not perms: missing.append(k) for n, a in v.action_registry.items(): p['actions'] = [n] perms = a({}, mgr).get_permissions() found = bool(perms) if not isinstance(perms, (list, tuple, set)): found = False if not found: missing.append("%s.actions.%s" % ( k, n)) for n, f in v.filter_registry.items(): if n in ('and', 'or', 'not'): continue p['filters'] = [n] perms = f({}, mgr).get_permissions() if not isinstance(perms, (tuple, list, set)): missing.append("%s.filters.%s" % ( k, n)) # in memory filters if n in ('event', 'value', 'tag-count', 'marked-for-op', 'offhour', 'onhour', 'age', 'state-age', 'egress', 'ingress', 'capacity-delta', 'is-ssl', 'global-grants', 'missing-policy-statement', 'missing-statement', 'healthcheck-protocol-mismatch', 'image-age', 'has-statement', 'instance-age', 'ephemeral', 'instance-uptime'): continue if not perms: missing.append("%s.filters.%s" % ( k, n)) if missing: self.fail("Missing permissions %d on \n\t%s" % ( len(missing), "\n\t".join(sorted(missing))))
def prepareImage(name, path): path = Config.imagefile(path) logging.error('ImageButton prepareImage %s', path) if path.endswith(".png"): pix = cairo.ImageSurface.create_from_png(path) self.is_png = True elif path.endswith(".svg"): pix = GdkPixbuf.Pixbuf.new_from_file(path) self.is_png = False self.image[name] = pix self.iwidth[name] = pix.get_width() self.iwidthDIV2[name] = self.iwidth[name] // 2 self.iheight[name] = pix.get_height() self.iheightDIV2[name] = self.iheight[name] // 2
def test_mu_metrics(self): session_factory = self.replay_flight_data('test_mu_metrics') p = Policy({ 'resources': 's3', 'name': 's3-bucket-policy', 'resource': 's3', 'mode': { 'type': 'cloudtrail', 'events': ['CreateBucket'], }, 'actions': ['no-op']}, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) end = datetime.utcnow() start = end - timedelta(1) results = mgr.metrics([pl], start, end, 3600) self.assertEqual( results, [{'Durations': [], 'Errors': [], 'Throttles': [], 'Invocations': []}])
def test_cwe_trail(self): session_factory = self.replay_flight_data('test_cwe_trail', zdata=True) p = Policy({ 'resource': 's3', 'name': 's3-bucket-policy', 'mode': { 'type': 'cloudtrail', 'events': ["CreateBucket"], }, 'filters': [ {'type': 'missing-policy-statement', 'statement_ids': ['RequireEncryptedPutObject']}], 'actions': ['no-op'] }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) events = pl.get_events(session_factory) self.assertEqual(len(events), 1) event = events.pop() self.assertEqual( json.loads(event.render_event_pattern()), {u'detail': {u'eventName': [u'CreateBucket'], u'eventSource': [u's3.amazonaws.com']}, u'detail-type': ['AWS API Call via CloudTrail']}) self.assert_items( result, {'Description': 'cloud-custodian lambda policy', 'FunctionName': 'custodian-s3-bucket-policy', 'Handler': 'custodian_policy.run', 'MemorySize': 512, 'Runtime': RUNTIME, 'Timeout': 60}) mgr.remove(pl)
def test_cwe_instance(self): session_factory = self.replay_flight_data( 'test_cwe_instance', zdata=True) p = Policy({ 'resource': 's3', 'name': 'ec2-encrypted-vol', 'mode': { 'type': 'ec2-instance-state', 'events': ['pending']} }, Config.empty()) pl = PolicyLambda(p) mgr = LambdaManager(session_factory) result = mgr.publish(pl, 'Dev', role=self.role) self.assert_items( result, {'Description': 'cloud-custodian lambda policy', 'FunctionName': 'c7n-ec2-encrypted-vol', 'Handler': 'c7n_policy.run', 'MemorySize': 512, 'Runtime': RUNTIME, 'Timeout': 60}) events = session_factory().client('events') result = events.list_rules(NamePrefix="c7n-ec2-encrypted-vol") self.assert_items( result['Rules'][0], {"State": "ENABLED", "Name": "c7n-ec2-encrypted-vol"}) self.assertEqual( json.loads(result['Rules'][0]['EventPattern']), {"source": ["aws.ec2"], "detail": { "state": ["pending"]}, "detail-type": ["EC2 Instance State-change Notification"]}) mgr.remove(pl)
def get_manager(self, data, config=None, session_factory=None): ctx = ExecutionContext( session_factory, Bag({'name':'test-policy'}), config or Config.empty()) return EC2(ctx, data)
def cleandir(self): needclean = Config(common.CONFIG_NEEDCLEAN) for path, sha1v in needclean.getsection('NEEDCLEAN'): path = path.replace('\\','/') path = path.replace('$path$/','') FileUtil.if_has_file_remove(path,showinfo = True)
from gi.repository import Gtk import time import common.Config as Config from common.Util.ThemeWidgets import * from common.Util import InstrumentDB from common.port.scrolledbox import HScrolledBox import sugar3.graphics.style as style import logging INSTRUMENT_SIZE = Config.scale(114) Tooltips = Config.Tooltips class InstrumentPanel( Gtk.EventBox ): def __init__(self,setInstrument=None): Gtk.EventBox.__init__(self) self._scrolled_window = None self.instrumentDB = InstrumentDB.getRef() self.setInstrument = setInstrument self.playInstrument = None self.micRec = None self.synthRec = None self.rowLen = None self.enterMode = False
# limitations under the License. import unittest from dateutil.parser import parse as date_parse from c7n.policy import Policy from c7n.reports.csvout import Formatter from common import Config, load_data EC2_POLICY = Policy( { 'name': 'report-test-ec2', 'resource': 'ec2', }, Config.empty(), ) ASG_POLICY = Policy( { 'name': 'report-test-asg', 'resource': 'asg', }, Config.empty(), ) ELB_POLICY = Policy( { 'name': 'report-test-elb', 'resource': 'elb', }, Config.empty(), )
def cleandir(self): needclean = Config(common.CONFIG_NEEDCLEAN) for path, sha1v in needclean.getsection("NEEDCLEAN"): path = path.replace("\\", "/") path = path.replace("$path$/", "") FileUtil.if_has_file_remove(path, showinfo=True)
def test_file_not_found(self): self.assertRaises( IOError, policy.load, Config.empty(), "/asdf12")