def activate(self): conn = util.as_conn() conn = boto.ec2.autoscale.connect_to_region('us-west-2') name = self.name() # check if this LC already exists if self.exists(): if not util.confirm("LaunchConfig {} already exists, overwrite?".format(name)): return True # delete existing conn.delete_launch_configuration(name) # get configuration for this LC cfg = self.role_config lc = LaunchConfiguration( name = name, image_id = cfg.get('ami'), instance_profile_name = cfg.get('iam_profile'), instance_type = cfg.get('instance_type'), security_groups = cfg.get('security_groups'), key_name = cfg.get('keypair_name'), user_data = self.cloud_init_script(), associate_public_ip_address = True, # this is required for your shit to actually work ) if not conn.create_launch_configuration(lc): print "Error creating LaunchConfig {}".format(name) return False util.message_integrations("Activated LaunchConfig {}".format(name)) return lc
def copy_template_file(fips_dir, proj_dir, filename, values, silent=False) : """copy a template file from fips/templates to the project directory and replace template values (e.g. the project name), ask for user permission if files exist :param fips_dir: absolute fips directory :param proj_dir: absolute project directory :param filename: filename to copy from fips/templates :param values: template key/value dictionary :param silent: if True, overwrite existing file and don't print status :returns: True file overwritten, False on not overwritten """ src_path = fips_dir + '/templates/' + filename dst_path = proj_dir + '/' + filename if not os.path.isfile(src_path) : log.error("template src file '{}' doesn't exist".format(src_path)) if not silent : if os.path.isfile(dst_path) : if not util.confirm("overwrite '{}'?".format(dst_path)) : log.info("skipping '{}'".format(dst_path)) return False content = None with open(src_path, 'r') as f : content = f.read() content = Template(content).substitute(values) with open(dst_path, 'w') as f : f.write(content) if not silent : log.info("wrote '{}'".format(dst_path)) return True
def scale(self, desired): debug("In asgroup.y scale") asgroup = self.get_asgroup() client = util.as_conn() asg_name = self.name() if desired < asgroup['MinSize']: print "Cannot scale: {} is lower than MinSize ({})".format(desired, asgroup['MinSize']) return if desired > asgroup['MaxSize']: print "Cannot scale: {} is greater than MaxSize ({})".format(desired, asgroup['MaxSize']) if not util.confirm("Increase MaxSize to {}?".format(desired)): return asgroup['MaxSize'] = desired client.update_auto_scaling_group( AutoScalingGroupName = asg_name, MaxSize = desired ) current = asgroup['DesiredCapacity'] # Set DesiredCapacity response = client.set_desired_capacity( AutoScalingGroupName = asg_name, DesiredCapacity = desired ) # Check if DesiredCapacity was changed debug("in asgroup.py scale: running 'asgroup = self.get_asgroup()'") asgroup = self.get_asgroup() new = asgroup['DesiredCapacity'] if (new != current): msg = "Changed ASgroup {} desired_capacity from {} to {}".format(asg_name, current, new) util.message_integrations(msg)
def copy_template_file(fips_dir, proj_dir, filename, values, silent=False): """copy a template file from fips/templates to the project directory and replace template values (e.g. the project name), ask for user permission if files exist :param fips_dir: absolute fips directory :param proj_dir: absolute project directory :param filename: filename to copy from fips/templates :param values: template key/value dictionary :param silent: if True, overwrite existing file and don't print status :returns: True file overwritten, False on not overwritten """ src_path = fips_dir + '/templates/' + filename dst_path = proj_dir + '/' + filename if not os.path.isfile(src_path): log.error("template src file '{}' doesn't exist".format(src_path)) if not silent: if os.path.isfile(dst_path): if not util.confirm("overwrite '{}'?".format(dst_path)): log.info("skipping '{}'".format(dst_path)) return False content = None with open(src_path, 'r') as f: content = f.read() content = Template(content).substitute(values) with open(dst_path, 'w') as f: f.write(content) if not silent: log.info("wrote '{}'".format(dst_path)) return True
def command(self, opts, pattern): """Disable check bundles based on pattern Note: if you want to disable only some metrics for a check, use the disable_metrics command instead. Arguments: pattern -- search pattern for checks """ checks, groups = util.find_checks(self.api, pattern) if not checks: log.error("No matching checks found\n" % check_id) return print "Disabling the following check bundles: " bundle_ids = {} for c in checks: if c['bundle_id'] not in bundle_ids: print " %s" % c['name'] bundle_ids[c['bundle_id']] = c['name'] if util.confirm(): for c in bundle_ids: log.msg("Disabling %s" % bundle_ids[c]) self.api.disable_check_bundle(bundle_id=c)
def scale(self, desired): debug("In asgroup.y scale") asgroup = self.get_asgroup() client = util.as_conn() asg_name = self.name() if desired < asgroup['MinSize']: print "Cannot scale: {} is lower than MinSize ({})".format( desired, asgroup['MinSize']) return if desired > asgroup['MaxSize']: print "Cannot scale: {} is greater than MaxSize ({})".format( desired, asgroup['MaxSize']) if not util.confirm("Increase MaxSize to {}?".format(desired)): return asgroup['MaxSize'] = desired client.update_auto_scaling_group(AutoScalingGroupName=asg_name, MaxSize=desired) current = asgroup['DesiredCapacity'] # Set DesiredCapacity response = client.set_desired_capacity(AutoScalingGroupName=asg_name, DesiredCapacity=desired) # Check if DesiredCapacity was changed debug("in asgroup.py scale: running 'asgroup = self.get_asgroup()'") asgroup = self.get_asgroup() new = asgroup['DesiredCapacity'] if (new != current): msg = "Changed ASgroup {} desired_capacity from {} to {}".format( asg_name, current, new) util.message_integrations(msg)
def activate(self): debug("in launchconfig.py activate") conn = util.as_conn() name = self.name() if self.exists(): pprint("in launchconfig.py self.exists()") # NOTE: I don't think program logic ever gets here if not util.confirm("LaunchConfig {} already exists, overwrite?".format(name)): pprint("in launchconfig.py activate: Confirmed overwriting LaunchConfig") return True # delete existing pprint("in launchconfig.py activate: deleting LaunchConfig") conn.delete_launch_configuration(LaunchConfigurationName=name) # get configuration for this LC cfg = self.role_config # NOTE: wrap the following in a try block to catch errors lc = conn.create_launch_configuration( AssociatePublicIpAddress = True, # this is required to make your stuff actually work LaunchConfigurationName = name, IamInstanceProfile = cfg.get('iam_profile'), ImageId = cfg.get('ami'), InstanceType = cfg.get('instance_type'), KeyName = cfg.get('keypair_name'), UserData = self.cloud_init_script(), SecurityGroups = cfg.get('security_groups') ) #if not conn.create_launch_configuration(lc): # print "Error creating LaunchConfig {}".format(name) # return False util.message_integrations("Activated LaunchConfig {}".format(name)) return lc
def main(): if os.path.exists(OUTPUT_FILE): if util.confirm( "The destination file already exist. Do you want to overwrite it [y/n]?" ): filter_samples() else: filter_samples()
def reload(self): if not util.confirm( "Are you sure you want to tear down the {} ASgroup and recreate it?" .format(self.name())): return util.message_integrations("Reloading ASgroup {}".format(self.name())) self.deactivate() util.retry(lambda: self.activate(), 60)
def deregister_bot(conn, name): existing = conn.execute('select * from bots where name = ?', (name,)).fetchall() if existing and util.confirm("Delete bot {}?".format(existing[0]['id']), json_confirm=True): query = 'delete from bots where id = ?' conn.execute(query, (existing[0]['id'],)) output.output("Bot deregistered.") else: output.output("No bot to deregister.") rerank_bots(conn)
def command(self, opts, duration, pattern, notes=""): """Schedule maintenance for rules matching the pattern Arguments: duration -- how long should the maintenance window last? pattern -- pattern to match the check name with notes -- optional notes for the maintenance window Duration should be of the form <integer>[m|h|d]. Examples: 10m == 10 minutes 4h == 4 hours 2d == 2 days """ if duration[-1] not in 'mhd': log.error("Duration needs to be of the form <integer>[m|h|d]") sys.exit(1) rules = self.api.list_rules() checks = self.api.list_checks(active='true') filtered_checks = {} for c in checks: if re.search(pattern, c['name'], flags=re.IGNORECASE): filtered_checks[c['check_id']] = c filtered_rules = [r for r in rules if r['check_id'] in filtered_checks] # Remove duplicates dedup_rules = {} for r in filtered_rules: dedup_rules[(r['check_id'], r['metric_name'], r['severity'])] = r filtered_rules = dedup_rules.values() log.msg("Scheduling maintenance for:") for r in sorted(filtered_rules): print " Sev %s : %s : %s (from %s)" % ( r['severity'], filtered_checks[r['check_id']]['name'], r['metric_name'], filtered_checks[r['check_id']]['agent']) if util.confirm(): log.msg("Setting maintenance:") for r in filtered_rules: log.msgnb("Sev %s : %s : %s..." % ( r['severity'], filtered_checks[r['check_id']]['name'], r['metric_name'])) try: self.api.add_maintenance( check_id=r['check_id'], start='now', stop=duration, metric_name=r['metric_name'], severity=r['severity'], notes=notes) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed") log.error(e.error)
def reload(self): # skip deactivation if it doesn't exist asgroup = self.get_asgroup() if not asgroup or not self.exists(): self.activate() return debug("In asgroup.py reload") if not util.confirm("Are you sure you want to tear down the {} ASgroup and recreate it?".format(self.name())): return util.message_integrations("Reloading ASgroup {}".format(self.name())) self.deactivate() util.retry(lambda: self.activate(), 60)
def command(self, opts, pattern, *metrics_to_enable): """Set the active metrics for a check based on regular expression This command will set the enabled metrics to exactly what matches the pattern(s) given. Any other metrics will be disabled, regardless of what their original setting was. Arguments: pattern -- Pattern for checks metrics_to_enable -- One or more regexes for enabled metrics """ checks, groups = util.find_checks(self.api, pattern) to_enable = {} # Pick only one check per check bundle bundles = {} for c in checks: if c['bundle_id'] in bundles: continue bundles[c['bundle_id']] = c log.msg("Retrieving metrics for checks") count = 0 for c in bundles.values(): count += 1 print "\r%s/%s" % (count, len(bundles)), sys.stdout.flush() rv = self.api.list_available_metrics(check_id=c['check_id']) to_enable[c['check_id']] = [] for mtype in rv['metrics']: for metric in rv['metrics'][mtype]: for pattern in metrics_to_enable: if re.match(pattern, metric): to_enable[c['check_id']].append(metric) log.msg("About to set enabled metrics for the following checks") for c in bundles.values(): log.msg(" %s (%s)" % (c['name'], ', '.join(sorted(to_enable[c['check_id']])))) if util.confirm(): for c in bundles.values(): # Enable metrics here log.msgnb("%s..." % c['name']) # The set of metrics has changed, apply the edit self.api.edit_check_bundle( bundle_id=c['bundle_id'], metric_name=to_enable[c['check_id']]) log.msgnf("Done")
def command(self, opts, pattern, *metrics_to_enable): """Activate metrics for checks Arguments: pattern -- Pattern for checks metrics_to_enable -- List of metrics to enable """ checks, groups = util.find_checks(self.api, pattern) already_enabled = {} # Pick only one check per check bundle bundles = {} for c in checks: if c['bundle_id'] in bundles: continue bundles[c['bundle_id']] = c log.msg("Retrieving metrics for checks") count = 0 for c in bundles.values(): count += 1 print "\r%s/%s" % (count, len(bundles)), sys.stdout.flush() rv = self.api.list_metrics(check_id=c['check_id']) already_enabled[c['check_id']] = [] for metric in sorted(rv): if metric['enabled']: already_enabled[c['check_id']].append(metric['name']) log.msg("Metrics to enable: %s" % (', '.join(metrics_to_enable))) log.msg("About to enable metrics for the following checks") for c in bundles.values(): log.msg(" %s (%s)" % (c['name'], ', '.join(already_enabled[c['check_id']]))) if util.confirm(): for c in bundles.values(): # Enable metrics here log.msgnb("%s..." % c['name']) all_metrics = set(already_enabled[c['check_id']]) \ | set(metrics_to_enable) if all_metrics != set(already_enabled[c['check_id']]): # The set of metrics has changed, apply the edit self.api.edit_check_bundle( bundle_id=c['bundle_id'], metric_name=list(all_metrics)) log.msgnf("Done") else: log.msgnf("No changes")
def reload(self): # skip deactivation if it doesn't exist asgroup = self.get_asgroup() if not asgroup or not self.exists(): self.activate() return debug("In asgroup.py reload") if not util.confirm( "Are you sure you want to tear down the {} ASgroup and recreate it?" .format(self.name())): return util.message_integrations("Reloading ASgroup {}".format(self.name())) self.deactivate() util.retry(lambda: self.activate(), 60)
def do_drop(self, args_str): parser = self._get_arg_parser() parser.add_argument('codes', nargs='*') options = self._parse_arg(parser, args_str) if not options: return if len(options.codes) < 1: if util.confirm('drop all data?'): logging.info('all local data will be dropped') self.dm.drop_local_data(None) else: for code in options.codes: logging.info('drop stock/index %s' % code) self.dm.drop_local_data(code)
def command(self, opts, pattern, replacement): """Rename multiple graphs at once Arguments: pattern -- a regex to select the graphs to rename replacement -- what to replace the graph name with The replacement can contain \1, \2 etc. to refer to groups in the pattern. """ rv = self.api.list_graphs() filtered_graphs = [] for g in sorted(rv, lambda a, b: cmp(a['title'], b['title'])): if re.search(pattern, g['title']): filtered_graphs.append(g) renames = {} log.msg("Going to perform the following renames:") for g in filtered_graphs: renames[g['title']] = re.sub(pattern, replacement, g['title']) log.msg(" %s => %s" % (g['title'], renames[g['title']])) if util.confirm(): for g in filtered_graphs: log.msgnb("Renaming %s..." % g['title']) try: rv = self.api.get_graph( graph_id=g['graph_id']) except circonusapi.CirconusAPIError, e: log.msgnf("Failed to fetch current graph") log.error(e.error) continue try: graph_data = json.loads(rv['graph_data']) except KeyError: log.msgnf("Failed to fetch current graph") log.error("No graph data returned") continue except ValueError: log.msgnf("Failed to fetch current graph") log.error("Unable to parse the graph data") continue graph_data['title'] = renames[g['title']] try: rv = self.api.edit_graph(graph_id=g['graph_id'], graph_data=json.dumps(graph_data)) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed to edit graph") log.error(e.error)
def command(self, opts, pattern, replacement): """Rename multiple checks at once Options: -a - Include inactive and deleted checks also Arguments: pattern -- a regex to select the checks to rename replacement -- what to replace the check name with The replacement can contain \1, \2 etc. to refer to groups in the pattern. """ active = 'true' if ('-a', '') in opts: active = '' rv = self.api.list_checks(active=active) filtered_checks = [] # We rename bundles, not checks, so only do each bundle once bundles = {} for check in sorted(rv): if re.search(pattern, check['name']): if not check['bundle_id'] in bundles: filtered_checks.append(check) bundles[check['bundle_id']] = True renames = {} log.msg("Going to perform the following renames:") for c in filtered_checks: renames[c['name']] = re.sub(pattern, replacement, c['name']) log.msg("%s => %s" % (c['name'], renames[c['name']])) if util.confirm(): for c in filtered_checks: log.msgnb("Renaming %s... " % c['name']) metrics = [m['name'] for m in self.api.list_metrics(check_id=c['check_id'])] params = { 'bundle_id': c['bundle_id'], 'metric_name': metrics, 'display_name_%s' % c['target']: renames[c['name']]} try: rv = self.api.edit_check_bundle(**params) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed") log.error(e.error)
def command(self, opts, check_pattern, metric_pattern): """List active metrics, optionally disabling them Note: if you want to disable all metrics for a check, use the disable_checks command instead. Options: -d - disable the metrics Arguments: check_pattern -- search pattern for checks metric_pattern -- search pattern for metrics """ checks, groups = util.find_checks(self.api, check_pattern) if not checks: log.error("No matching checks found\n" % check_id) return to_remove = {} to_keep = {} check_names = {} bundle_names = {} for c in checks: check_names[c['check_id']] = c['name'] bundle_names[c['bundle_id']] = c['name'] matching_metrics, non_matching_metrics = util.find_metrics( self.api, c['check_id'], metric_pattern) if matching_metrics: to_remove[c['check_id']] = matching_metrics to_keep[c['bundle_id']] = non_matching_metrics print non_matching_metrics print "Disabling the following metrics: " for c in sorted(to_remove): print " %s" % check_names[c] for m in to_remove[c]: print " %s" % m['name'] if ('-d', '') in opts: if util.confirm(): for c in to_keep: log.msg("Disabling metrics for check: %s" % bundle_names[c]) self.api.edit_check_bundle(bundle_id=c, metric_name=[i['name'] for i in to_keep[c]])
def command(self, opts, check_pattern, metric_pattern=None): """Removes rules for checks that match the given pattern Arguments: check_pattern -- regex to match check names on (optional) metric_pattern -- regex to match metric names on (optional) At least one of check_pattern or metric_pattern must be provided. If you want to leave out the check_pattern, then specify it as an empty string. """ rules = self.api.list_rules() if check_pattern and check_pattern != '.': checks, groups = util.find_checks(self.api, check_pattern) check_ids = dict([(c['check_id'], c['name']) for c in checks]) matching = [r for r in rules if r['check_id'] in check_ids] else: checks = self.api.list_checks() check_ids = dict([(c['check_id'], c['name']) for c in checks]) matching = rules if metric_pattern: matching = [r for r in matching if re.search(metric_pattern, r['metric_name'])] matching = sorted(matching, reverse=True, key=lambda x: (x['check_id'], x['metric_name'], x['order'])) log.msg("About to delete the following rules:") for r in matching: log.msg("%s`%s (%s - %s %s)" % (check_ids[r['check_id']], r['metric_name'], r['order'], r['criteria'], r['value'])) if util.confirm(): for r in matching: log.msgnb("Deleting %s`%s (%s)..." % ( check_ids[r['check_id']], r['metric_name'], r['order'])) try: rv = self.api.remove_metric_rule(check_id=r['check_id'], metric_name=r['metric_name'], order=r['order']) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed") log.error(e.error)
def scale(self, desired): asgroup = self.get_asgroup() if desired < asgroup.min_size: print "Cannot scale: {} is lower than min_size ({})".format(desired, asgroup.min_size) return if desired > asgroup.max_size: print "Cannot scale: {} is greater than max_size ({})".format(desired, asgroup.max_size) if not util.confirm("Increase max_size to {}?".format(desired)): return asgroup.max_size = desired current = asgroup.desired_capacity asgroup.desired_capacity = desired asgroup.update() asgroup = self.get_asgroup() new = asgroup.desired_capacity if (new != current): msg = "Changed ASgroup {} desired_capacity from {} to {}".format(self.name(), current, new) util.message_integrations(msg)
def command(self, opts, template_name, pattern): """Add graphs for multiple checks in bulk based on a template Arguments: template_name -- the name of the template file pattern -- a regex to match on check names The templates are in json, and is in the same format as the output of the dump_graph command. Various string subsitutions can be used: {check_id} - The check ID {check_name} - The check name {check_target} - The target of the check (IP address) {check_agent} - The agent the check is run from {groupN} - Matching groups (the parts in parentheses) in the pattern given on the command line. (replace N with a group number) You can also use named matching groups - (?P<groupname>...) in the pattern and {groupname} in the graph template. """ try: template = util.GraphTemplate(template_name) except IOError: log.error("Unable to open template %s" % template_name) sys.exit(1) checks, groups = util.find_checks(self.api, pattern) util.verify_metrics(self.api, template, checks) log.msg("About to add %s graphs for the following checks:" % ( template_name)) for c in checks: log.msg(" %s (%s)" % (c['name'], c['agent'])) if not util.confirm(): log.msg("Not adding graphs.") sys.exit() self.add_graphs(template, checks, groups)
def command(self, opts, pattern): """Delete multiple graphs at once Arguments: pattern -- a regex to select the graphs to delete """ rv = self.api.list_graphs() filtered_graphs = [] for g in sorted(rv, lambda a, b: cmp(a['title'], b['title'])): if re.search(pattern, g['title']): filtered_graphs.append(g) log.msg("Going to DELETE the following graphs:") for g in filtered_graphs: log.msg(" %s" % g['title']) if util.confirm(): for g in filtered_graphs: log.msgnb("Deleting %s..." % g['title']) try: rv = self.api.remove_graph(graph_id=g['graph_id']) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed") log.error(e.error)
def scale(self, desired): asgroup = self.get_asgroup() if desired < asgroup.min_size: print "Cannot scale: {} is lower than min_size ({})".format( desired, asgroup.min_size) return if desired > asgroup.max_size: print "Cannot scale: {} is greater than max_size ({})".format( desired, asgroup.max_size) if not util.confirm("Increase max_size to {}?".format(desired)): return asgroup.max_size = desired current = asgroup.desired_capacity asgroup.desired_capacity = desired asgroup.update() asgroup = self.get_asgroup() new = asgroup.desired_capacity if (new != current): msg = "Changed ASgroup {} desired_capacity from {} to {}".format( self.name(), current, new) util.message_integrations(msg)
def command(self, opts, target, agent, community, friendly_name, pattern=None): """Adds snmp checks for a switch This command queries the switch using snmpwalk to discover what ports to add checks for. This requires that the snmpwalk command be available and that the switch be accessible over snmp from the machine that this command is run from. Arguments: target -- The address of the switch agent -- The name of the agent you wish circonus to use for the checks community -- SNMP community for the switch friendly_name -- what to call the switch in the check name. This is usually the (short) hostname of the switch. pattern -- An optional regex to limit which ports to add. """ # TODO - abstract this away and prompt the user for a list of # available agents rv = self.api.list_agents() agents = dict([(i['name'], i['agent_id']) for i in rv]) try: self.agent = agents[agent] except KeyError: log.error("Invalid/Unknown Agent: %s" % agent) sys.exit(1) self.target = util.resolve_target(target) self.community = community self.friendly_name = friendly_name self.ports = self.get_ports(target, community, pattern) log.msg("About to add checks for the following ports:") for port in sorted(self.ports): log.msg(port) if util.confirm(): self.add_checks()
def download(bot_path): """ Downloads the bot to the file bot_path. May only be called once Config is properly initialized. :param bot_path: The path the bot should be written :return: Nothing """ config = client.Config() bot_path = pathlib.Path(bot_path) if bot_path.exists(): # Confirm overwriting if not util.confirm("{} already exists. Overwrite?".format(bot_path)): output.output("Aborting download.") return # Make the directories bot_path.parent.mkdir(parents=True, exist_ok=True) output.output("Downloading bot...") result = _download_bot(config.user_id, config.api_key, bot_path) if result.status_code != client.SUCCESS: raise IOError("Unable to download bot: {}".format(result.text)) output.output("Successfully downloaded bot with version {}".format( _get_bot_version(config.user_id)))
def command(self, opts, title, pattern): """Add a worksheet containing matching graphs Arguments: pattern -- a regex to match on graph names Options: -f/--favorite -- mark the worksheet as a favorite """ rv = self.api.list_graphs() filtered_graphs = [] for g in sorted(rv, lambda a, b: cmp(a['title'], b['title'])): if re.search(pattern, g['title']): filtered_graphs.append(g) favorite = False if ('-f', '') in opts or ('--favorite', '') in opts: favorite = True worksheet_data = { 'title': title, 'favorite': favorite, 'graphs': filtered_graphs} log.msg("Adding a worksheet with the following graphs:") for i in filtered_graphs: log.msg(" %s" % i['title']) if favorite: log.msg("Worksheet will be marked as a favorite") if not util.confirm(): log.msg("Not adding worksheet") sys.exit(1) log.msgnb("Adding worksheet... ") try: self.api.add_worksheet(worksheet_data=json.dumps(worksheet_data)) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed") log.error("Unable to add worksheet: %s" % e)
def command(self, opts, graph_id, new_title, *params): """Copy a graph, changing some parameters Options: -v -- Show the new graph data before adding the graph Arguments: graph_id -- The UUID of the graph you want to copy new_title -- The title of the new graph params -- Search/replace on datapoint values The search/replace parameters will replace any datapoint values, including the check_id, metric_name, colors, and datapoint names. Parameters should be of the form: search_term=replacement For example, to modify the check id, you can do 1234=2345 You can also specify a single number without an equals sign. In this case, all check ids that aren't replaced with another pattern will be set to this value. If you have a graph that is only for a single check, then specifying the check id mapping as a single number is what you want. """ try: uuid.UUID(graph_id) except ValueError: log.error("Invalid graph ID specified. It should look like a UUID") sys.exit(1) rv = self.api.get_graph(graph_id=graph_id) graph_data = json.loads(rv["graph_data"]) # Set the new title graph_data["title"] = new_title subs = {} default_check_id = None for p in params: try: # First try to parse as a single check id default_check_id = int(p) continue except ValueError: pass parts = [i for i in p.split("=", 1)] try: subs[parts[0]] = parts[1] except IndexError: log.error("Invalid substitution: %s" % p) sys.exit(1) for d in graph_data["datapoints"]: for k in d: if type(d[k]) == str or type(d[k]) == unicode: # String search/replace for s in subs: d[k] = d[k].replace(s, subs[s]) print d[k], s, subs[s] elif type(d[k]) == int: # Integer replacement (matches only on the whole number) # Used for check_ids if str(d[k]) in subs: d[k] = int(subs[str(d[k])]) elif k == "check_id" and default_check_id: # If we didn't do a substitution previously, and we're # considering a check_id, replace the check_id with # the default d[k] = default_check_id if ("-v", "") in opts: print json.dumps(graph_data, indent=4) if not util.confirm(): log.msg("Not adding graph.") sys.exit(0) log.msgnb("Adding copied graph: %s..." % graph_data["title"]) try: rv = self.api.add_graph(graph_data=json.dumps(graph_data)) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed") log.error(e.error)
to_be_updated, to_be_removed, _ = util.compare_filelists( lyrics_on_local_rel, lyrics_on_walkman, root_src=lyrics_dir, root_dst=walkman_dir) util.status('Syncing lyrics files from local to Walkman') util.sync_filelists(to_be_updated, to_be_removed, src_dir=lyrics_dir, dst_dir=walkman_dir, remove_unmatched=remove_unmatched) if __name__ == '__main__': if util.confirm('Sync songs in playlist to Walkman?'): sync_playlist(playlists=setting.PLAYLISTS, walkman_dir=setting.WALKMAN_DIR) create_m3u_playlist(playlists=setting.PLAYLISTS, walkman_dir=setting.WALKMAN_DIR, walkman_prefix=setting.WALKMAN_PLAYLIST_PREFIX) if util.confirm('Create local lyrics directory?'): create_local_lyrics(playlists=setting.PLAYLISTS, lyrics_dir=setting.LYRICS_DIR, lyrics_source_dir=setting.LYRICS_SOURCE_DIR) if util.confirm('Sync lyrics to Walkman?'): sync_lyrics(playlists=setting.PLAYLISTS, lyrics_dir=setting.LYRICS_DIR,
("Calculate Voronoi diagram", "voronoi.py"), ("Patch trajectories", "patch.py"), ("Export csv", "export.py") ] def message(text): print(text) if hasattr(config, "NOTIFY_CMD"): try: p = subprocess.Popen(config.NOTIFY_CMD, stdin=subprocess.PIPE, shell=True) p.communicate(text + "\n") except Exception, e: print("Notify command could not be executed: " + e.message) start_step = 0 if util.confirm("Did you already run parts of the procedure before?", allow_empty = True, default = False): print("\n".join(["[" + str(i+1) + "] " + action for i, (action, script) in enumerate(steps)])) start_step = int(raw_input("Which step do you want to start from (all previous steps have to be run successfully before and config.py should not have been changed in the meantime)? [1-" + str(len(steps)) + "]: ")) - 1 else: for action in preparations: if not util.confirm("Did you " + action + "?", allow_empty = True, default = True): print("Please " + action + " before you continue.") sys.exit() for i, (action, script) in enumerate(steps[start_step:]): print("\033[93m[" + str(i+1) + "/" + str(len(steps)-start_step) + "]\033[0m " + action) start = time.time() if os.system("python " + script) == 0: end = time.time() message(action + " finished successfully after " + str((end-start)/60.0) + " minutes.") else:
response = util.get_url("dna/intent/api/v1/network-device") #print(json.dumps(response, indent=2)) print("Parsing device list...") devicelist = {'list': []} for dev in response['response']: if dev['series'] in SCOPE: if dev['managementIpAddress'].lower() != dev['hostname'].lower(): print("Found new device, collecting information...") devicelist['list'].append(dev) data = "\n".join([ "{}:\t{}".format(dev['managementIpAddress'], dev['series']) for dev in devicelist['list'] ]) util.sep( "Parsing complete, here is a list of devices that needs updated, please confirm..." ) print(data) if util.confirm() == "y": print("Confirmed, updating devices...") for dev in devicelist['list']: util.update_device(dev['managementIpAddress'], dev['hostname'].lower()) util.sep("Task completed, exiting...") else: util.sep("Task canceled, exiting...")
def reload(self): if not util.confirm("Are you sure you want to tear down the {} ASgroup and recreate it?".format(self.name())): return util.message_integrations("Reloading ASgroup {}".format(self.name())) self.deactivate() util.retry(lambda: self.activate(), 60)
def command(self, opts, template_name, pattern, *params): """Adds rules for checks that match the given pattern Arguments: template_name -- the name of the template file pattern -- regex to match check names on params -- other parameters (see below) Other parameters are specified as "param_name=value" and will be substituted in the template. Use {param_name} in the template. Some predefined parameters: {check_id} - The check ID {check_name} - The check name {check_target} - The target of the check (IP address) {check_agent} - The agent the check is run from {groupN} - Matching groups (the parts in parentheses) in the pattern given on the command line. (replace N with a group number) """ template = util.RuleTemplate(template_name) template_params = template.parse_nv_params(params) checks, groups = util.find_checks(self.api, pattern) util.verify_metrics(self.api, template, checks) log.msg("About to add %s rules for the following checks:" % (template_name)) for c in checks: log.msg(" %s (%s)" % (c["name"], c["agent"])) if not util.confirm(): log.msg("Not adding rules.") sys.exit() for c in checks: p = { "check_name": c["name"], "check_id": c["check_id"], "check_target": c["target"], "check_agent": c["agent"], } p.update(template_params) p.update(groups[c["check_id"]]) substituted = template.sub(p) # Get mapping from contact group name to ID rv = self.api.list_contact_groups() contact_group_ids = {} for i in rv: contact_group_ids[i["name"]] = i["contact_group_id"] for rule in substituted: # Extract the contact groups and get the IDs for severity in rule["contact_groups"]: contact_group_names = rule["contact_groups"][severity] del rule["contact_groups"][severity] rule["contact_groups"][severity] = [] for cg in contact_group_names: rule["contact_groups"][severity].append({"id": contact_group_ids[cg], "name": cg}) log.msgnb("Adding rule for %s... " % c["name"]) try: rv = self.api.set_ruleset(ruleset=json.dumps(rule)) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed") log.error(e.error)