def getOptions(args, base_path): # Options conf = os.path.join(base_path, "config.ini") parser = ArgumentParser(prog='CouchPotato.py') parser.add_argument('--data_dir', dest='data_dir', help='Absolute or ~/ path of the data dir') parser.add_argument( '--config_file', dest='config_file', help= 'Absolute or ~/ path of the settings file (default DATA_DIR/settings.conf)' ) parser.add_argument('--debug', action='store_true', dest='debug', help='Debug mode') parser.add_argument('--console_log', action='store_true', dest='console_log', help="Log to console") parser.add_argument('--quiet', action='store_true', dest='quiet', help='No console logging') parser.add_argument('--daemon', action='store_true', dest='daemon', help='Daemonize the app') parser.add_argument('--pid_file', dest='pid_file', help='Path to pidfile needed for daemon') # parse applicaions runtime configuration file and set options if os.path.exists(conf): argparse_config.read_config_file(parser, conf) options = parser.parse_args(args) data_dir = os.path.expanduser( options.data_dir if options.data_dir else getDataDir()) if not options.config_file: options.config_file = os.path.join(data_dir, 'settings.conf') if not options.pid_file: options.pid_file = os.path.join(data_dir, 'couchpotato.pid') options.config_file = os.path.expanduser(options.config_file) options.pid_file = os.path.expanduser(options.pid_file) return options
def get_args(): parser = argparse.ArgumentParser() parser.add_argument('-a', '--auth-service', type=str.lower, help='Auth Service [ptc|google]', default='ptc') parser.add_argument('-u', '--username', help='Username', required=True) parser.add_argument('-p', '--password', help='Password', required=False) parser.add_argument('-l', '--location', type=parse_unicode, help='Location, address or coordinates', required=True) parser.add_argument('-r', '--radius', help='Search radius [m]', required=True, type=int) parser.add_argument('-H', '--host', help='Set web server listening host', default='127.0.0.1') parser.add_argument('-P', '--port', type=int, help='Set web server listening port', default=5000) parser.add_argument('-d', '--debug', type=str.lower, help='Debug Level [info|debug]', default=None) parser.add_argument('-c', '--pycurl', help='Use pycurl downloader (unstable)', action='store_true') argparse_config.read_config_file(parser, os.path.expanduser("~/.pogomrc")) args = parser.parse_args() if args.password is None: args.password = getpass.getpass() return args
def getOptions(args, base_path): # Options conf = os.path.join(base_path, "config.ini") parser = ArgumentParser(prog = 'CouchPotato.py') parser.add_argument('--data_dir', dest = 'data_dir', help = 'Absolute or ~/ path of the data dir') parser.add_argument('--config_file', dest = 'config_file', help = 'Absolute or ~/ path of the settings file (default DATA_DIR/settings.conf)') parser.add_argument('--debug', action = 'store_true', dest = 'debug', help = 'Debug mode') parser.add_argument('--console_log', action = 'store_true', dest = 'console_log', help = "Log to console") parser.add_argument('--quiet', action = 'store_true', dest = 'quiet', help = 'No console logging') parser.add_argument('--daemon', action = 'store_true', dest = 'daemon', help = 'Daemonize the app') parser.add_argument('--pid_file', dest = 'pid_file', help = 'Path to pidfile needed for daemon') # parse applicaions runtime configuration file and set options if os.path.exists(conf): argparse_config.read_config_file(parser, conf) options = parser.parse_args(args) data_dir = os.path.expanduser(options.data_dir if options.data_dir else getDataDir()) if not options.config_file: options.config_file = os.path.join(data_dir, 'settings.conf') if not options.pid_file: options.pid_file = os.path.join(data_dir, 'couchpotato.pid') options.config_file = os.path.expanduser(options.config_file) options.pid_file = os.path.expanduser(options.pid_file) return options
help='N printed to GIZA NBEST file.') parser.add_argument('-j', '--json_out', help='File to dump json graphs to.') parser.add_argument('--num_restarts', type=int, default=5, help='Number of random restarts to execute during hill-climbing algorithm.') parser.add_argument('--align_out', help="Human-readable alignments output file - WARNING, will force conversion of const nodes to var nodes for alignment") parser.add_argument('--align_in', help="Alignments from human-editable text file, as from align_out") parser.add_argument('--layout', default='dot', help='Graphviz output layout') # TODO make interactive option and option to process a specific range args_conf = parser.parse_args() if args_conf.conf_file: argparse_config.read_config_file(parser, args_conf.conf_file) args = parser.parse_args() if args.no_verbose: args.verbose = False if not args.num_align_read: args.num_align_read = args.num_aligned_in_file if not os.path.exists(args.outdir): os.makedirs(args.outdir) if (args.bitext): xlang_main(args) else: if args.infile == None or args.outdir == None: raise parser.error("Both --infile and --outdir are required flags.")
parser.add_argument( '--align_out', help= "Human-readable alignments output file - WARNING, will force conversion of const nodes to var nodes for alignment" ) parser.add_argument( '--align_in', help="Alignments from human-editable text file, as from align_out") parser.add_argument('--layout', default='dot', help='Graphviz output layout') # TODO make interactive option and option to process a specific range args_conf = parser.parse_args() if args_conf.conf_file: argparse_config.read_config_file(parser, args_conf.conf_file) args = parser.parse_args() if args.no_verbose: args.verbose = False if not args.num_align_read: args.num_align_read = args.num_aligned_in_file if not os.path.exists(args.outdir): os.makedirs(args.outdir) if (args.bitext): xlang_main(args) else: if args.infile == None or args.outdir == None: raise parser.error(
def run(): loglevel = "WARNING" home = expanduser("~") prog = os.path.basename(__file__) cfn = home + '/' + '.' + os.path.splitext(prog)[0] + '.conf' p = argparse.ArgumentParser(description="yaml json k8s laundry", formatter_class=SmartFormatter) # overall app related stuff p.add_argument('-p', '--pretty', action='store_true', dest='pretty', default=False) p.add_argument('-t', '--type', action='store', dest='output_type', default='yaml', choices=['json', 'yaml'], help='Output type, json or yaml') # these are the command line leftovers, the files to process p.add_argument('files', nargs='*') # non application related stuff p.add_argument( '-l', '--loglevel', action='store', dest='loglevel', default=loglevel, choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Log level (DEBUG,INFO,WARNING,ERROR,CRITICAL) default is: ' + loglevel) p.add_argument( '-s', '--save', action='store_true', dest='save', default=False, help='save select command line arguments (default is always) in "' + cfn + '" file') # read in defaults from ~/.PROGBASENAMENOSUFFIX # if the file exists if os.path.isfile(cfn): argparse_config.read_config_file(p, cfn) # parse arguments (after reading defaults from ~/.dot file args = p.parse_args() if args.loglevel: loglevel = args.loglevel # set our logging level (from -l INFO (or whatever)) numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s', loglevel) logging.basicConfig(level=numeric_level) logging.info('Program starting :%s', prog) logging.debug('Arg: pretty :%s', args.pretty) logging.debug('Arg: type :%s', args.output_type) logging.debug('Arg: loglevel :%s', loglevel) logging.debug('Arg: save :%s', args.save) # save to the defaults file if a -s specified on command line if args.save: f = open(cfn, 'w') # remove the 'save' from the file f.write( re.sub( '\naddminion\n', re.sub( '\nsave\n', '\n', argparse_config.generate_config(p, args, section='default')))) f.close() # here we go, start of program here s_out = {"apiVersion": "v1", "kind": "List", "items": []} av = args.files if len(av) > 0: for i in range(len(av)): i_file = None if av[i] == '-': i_file = sys.stdin else: i_file = open(av[i]) s_out['items'].append(yaml.load(i_file.read())) else: logging.debug("no arguments specified, stdin assumed") s_out['items'].append(yaml.load(sys.stdin)) # if we are only messing with a single file... s = s_out if len(av) < 2: s = s_out['items'][0] if args.output_type == 'yaml': print yaml.dump(s) elif args.output_type == 'json': if args.pretty: print json.dumps(s, sort_keys=True, indent=4, separators=(',', ': ')) else: print json.dumps(s) else: raise ValueError('Invalid output type : %s', args.output_type)
'--learning_rate', type=float, help='Model Parameter lr: Learning rate') parser.add_argument('-k', type=float, help='Model Parameter k; Strength of weight constraint') parser.add_argument( '-p', type=float, help='Model Parameter p; L_p+1_Norm; Shape of weight constraint') '''...parse command line to override possible later defaults''' override_args = parser.parse_args() '''load default values if any...''' if os.path.exists('./scae_default.cfg'): import argparse_config argparse_config.read_config_file(parser, './scae_default.cfg') args = parser.parse_args([]) '''override parsed defaults with args from cl''' for arg in override_args.__dict__: if override_args.__dict__[arg] != None: args.__dict__[arg] = override_args.__dict__[arg] else: args = override_args '''also check args if any param is None. if None complain and exit''' try: for arg in args.__dict__: assert args.__dict__[arg] != None, \ 'Arg: ' + str(arg) + ' is None!' except Exception, e: print e exit()
def main(): parser = argparse.ArgumentParser( description='Run translation experiments.') parser.add_argument('--hidden_size', type=int, help='size of the hidden layer') parser.add_argument('--input', type=str, help='input data') parser.add_argument('--training_size', type=int, help='amount of training data to use.') # parser.add_argument('--iterations', type=int, # help='iterations of subgrad') parser.add_argument('config', type=str) parser.add_argument('label', type=str) print >>sys.stderr, open(sys.argv[1]).read() argparse_config.read_config_file(parser, sys.argv[1]) args = parser.parse_args() print args output_dir = os.path.join("Data", args.label) data_out = os.path.join(output_dir, "mydata.txt") print >>sys.stderr, data_out # Set up logging. logger = logging.getLogger("nn") logger.setLevel(logging.INFO) handler = logging.StreamHandler(open(data_out, 'w')) logger.addHandler(handler) formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) # Read in training data. sparse_lines = [] n_columns = 0 for l in open(args.input): if not l.strip(): continue sparse_line = map(int, l.split()) sparse_lines.append(sparse_line) n_columns = max(n_columns, max(sparse_line)) if len(sparse_lines) > args.training_size: break n_columns = n_columns + 1 data = numpy.zeros((len(sparse_lines), n_columns)) for i, line in enumerate(sparse_lines): data[i, line] = 1 data_pairs = numpy.zeros((len(sparse_lines), 2*n_columns)) for i in range(len(sparse_lines[:-1])): line = sparse_lines[i] next_line = numpy.array(sparse_lines[i+1]) data[i, line] = 1 data[i, next_line + n_columns] = 1 # Create a hidden layer. rng = numpy.random.RandomState(123) theano_rng = RandomStreams(rng.randint(2 ** 30)) x = T.matrix('x') da1 = dA(numpy_rng=rng, theano_rng=theano_rng, input=x, n_visible=n_columns, n_hidden=args.hidden_size) da2 = dA(numpy_rng=rng, theano_rng=theano_rng, input=x, n_visible=2*args.hidden_size, n_hidden=10) train_data = theano.shared(data, borrow=True) train.pre_train(da1, train_data, logger=logger) logger.info("DONE Pretraining of first layer") logger.info("START Pretraining of first layer2") layer1 = theano.function([x], da1.get_hidden_values(x)) layer1_1_output = layer1(data_pairs[:, :columns]) layer1_2_output = layer1(data_pairs[:, columns:]) output = theano.shared(numpy.hstack(layer1_1_output, layer1_2_output), borrow=True) train.pre_train(da2, output, logger=logger)
def main(): parser = argparse.ArgumentParser(description='Run parsing experiments.') parser.add_argument('--original_rules', type=str, help='Original rule file') parser.add_argument('--binarized_rules', type=str, help='Binarized rule file') parser.add_argument('--training_ps', type=str, help='Lexicalized phrase structure file.') parser.add_argument('--training_dep', type=str, help='Dependency parse file.') parser.add_argument('--store_hypergraph_dir', type=str, help='Directory to store/load hypergraphs.') parser.add_argument('--save_hypergraph', type=bool, help='Construct and save hypergraphs.') parser.add_argument('--limit', type=int, help='Number of sentences to use.') parser.add_argument('--test_file', type=str, help='Test file.') parser.add_argument('--gold_file', type=str, help='Gold file.') parser.add_argument('--model', type=str, help='Weight model.') parser.add_argument('--test_limit', type=int, help='Number of sentences to test on.') parser.add_argument('--run_eval', default=False, type=bool, help='') parser.add_argument('--test_load', default=False, type=bool, help='') parser.add_argument('--debugger', default=False, type=bool, help='') parser.add_argument('--oracle', default=False, type=bool, help='Run oracle experiments') parser.add_argument('config', type=str) parser.add_argument('label', type=str) print >>sys.stderr, open(sys.argv[1]).read() argparse_config.read_config_file(parser, sys.argv[1]) args = parser.parse_args() print args if args.debugger: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(color_scheme='Linux', call_pdb=1) output_dir = os.path.join("Data", args.label) data_out = os.path.join(output_dir, "mydata.txt") print >>sys.stderr, data_out # Set up logging. logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(open(data_out, 'w')) logger.addHandler(handler) # Load data. print args.training_dep print args.training_ps if args.training_dep: X, Y = train.read_data_set(args.training_dep, args.training_ps, args.limit) orules = tree.read_original_rules(open(args.original_rules)) grammar = read_rule_set(open(args.binarized_rules)) # for rule in grammar.unary_rules: # print rule X, Y = zip(*[(x, y) for x, y in zip(X, Y) if len(x.words) >= 5]) binarized_Y = [tree.binarize(orules, make_bounds(x.deps), y)[0] for x, y in zip(X, Y)] model = train.ReconstructionModel(feature_hash=int(1e7), joint_feature_format="fast", joint_feature_cache=False, part_feature_cache=False) model.set_grammar(grammar) model.initialize(X, binarized_Y) if args.test_load: print "LOAD" graphs = [] start = memory() for i in range(1000 -1): if len(X[i].words) < 5: continue x = X[i] path = "%s/graphs%s.graph"%(args.store_hypergraph_dir, i) encoder = LexicalizedCFGEncoder(x.words, x.tags, grammar) pre = memory() graph = pydecode.load(path) print i, memory() - pre, len(graph.edges), len(X[i].words), memory() - start pre = memory() encoder.load("%s/encoder%s.pickle"%( args.store_hypergraph_dir, i), graph) print i, memory() - pre graphs.append((graph, encoder)) elif args.save_hypergraph: print "SAVING" import time model.set_from_disk(None) for i in range(40000): if len(X[i].words) < 5: continue # if len(X[i].words) > 15: continue graph, encoder = model.dynamic_program(X[i]) # Sanity Check # print binarized_Y[i] # print encoder.structure_path(graph, binarized_Y[i]) if i % 100 == 0: print i pydecode.save("%s/graphs%s.graph"%( args.store_hypergraph_dir, X[i].index), graph) encoder.save("%s/encoder%s.pickle"%( args.store_hypergraph_dir, X[i].index), graph) del graph del encoder elif args.oracle: print "ORACLE" trees_out = open(os.path.join(output_dir, "oracle.txt"), 'w') model = train.ReconstructionModel(feature_hash=int(1e7), part_feature_cache=False, joint_feature_cache=False, joint_feature_format="sparse") model.set_grammar(grammar) model.initialize(X, binarized_Y) model.set_from_disk(None) X_test, Y_test = train.read_data_set( args.test_file, args.gold_file, args.test_limit) w = np.load(args.model) # GOLD TREES binarized_Y_test = [] for x, orig_y in zip(X_test, Y_test): y = tree.binarize(orules, orig_y) try: graph, encoder = model.dynamic_program(x) label_values = np.zeros(np.max(graph.labeling) + 1) label_values.fill(-1) possible = 0 brackets = set() for part in encoder.transform_structure(y): X = grammar.rule_nonterms(part[5])[0] brackets.add((part[0], part[2], X)) #print part if tuple(part) in encoder.encoder: label = encoder.encoder[tuple(part)] label_values[label] = 10.0 possible += 1 print "transform" label_weights = np.zeros(len(graph.labeling)) graph_labels = graph.labeling[graph.labeling != -1] parts = encoder.transform_labels(graph_labels) weights = [] for part in parts: X = grammar.rule_nonterms(part[5])[0] if part[1] != part[2] and X[0] != "Z": if (part[0], part[2], X) in brackets: weights.append(2.0) else: weights.append(-2.0) else: weights.append(0.0) label_weights = np.zeros(len(graph.labeling)) label_weights[graph.labeling != -1] = np.array(weights) # graph_labels = graph.labeling[graph.labeling != -1] # parts = encoder.transform_labels(graph_labels) # parts_features = model.parts_features(x, parts) # feature_indices = pydecode.model.sparse_feature_indices(parts_features, # model.temp_shape, # model.offsets, # model.feature_hash) # # Sum the feature weights for the features in each label row. # label_weights = np.zeros(len(graph.labeling)) # label_weights[graph.labeling != -1] = \ # np.sum(np.take(w, feature_indices, mode="clip"), axis=1) oracle_weights = pydecode.transform(graph, label_values) path = pydecode.best_path(graph, oracle_weights + label_weights) print "Match", oracle_weights.T * path.v, possible y_hat = encoder.transform_path(path) print >>trees_out, tree.remove_head(tree.unbinarize(y_hat)) \ .pprint(100000) except: print >>trees_out, "" print "error" continue elif args.test_file: print "TESTING" trees_out = open(os.path.join(output_dir, "trees.txt"), 'w') model = train.ReconstructionModel(feature_hash=int(1e7), part_feature_cache=False, joint_feature_cache=False, joint_feature_format="sparse") model.set_grammar(grammar) model.initialize(X, binarized_Y) model.set_from_disk(None) X_test, Y_test = train.read_data_set( args.test_file, args.gold_file, args.test_limit) w = np.load(args.model) # binarized_Y_test = [] # for i, y in enumerate(Y_test): # print i # binarized_Y_test.append(tree.binarize(orules, y)) # for x, y in zip(X_test, binarized_Y_test): for x in X_test: try: graph, encoder = model.dynamic_program(x) y_hat = model.inference(x, w) for part in encoder.transform_structure(y_hat): print part, grammar.rule_nonterms(part[-1]), model.score_part(x, w, part) a = w.T * model.joint_feature(x, y_hat) # b = w.T * model.joint_feature(x, y) # print a, b # if b > a: print "FAIL" print print tree.remove_head(y_hat) print print tree.remove_head(tree.unbinarize(y_hat))\ .pprint() # print tree.remove_head(tree.unbinarize(y))\ # .pprint() # print #)\tree.remove_head( print >>trees_out, tree.remove_head(tree.unbinarize(y_hat)) \ .pprint(100000) except: print "error" print >>trees_out, "" elif args.run_eval: test_file = os.path.join(output_dir, "oracle.txt") gold_file = args.gold_file print "Evaling", test_file, gold_file os.system("../evalb/EVALB/evalb -p ../evalb/EVALB/COLLINS.prm %s %s"%(gold_file, test_file)) else: print "TRAINING" model.set_from_disk(args.store_hypergraph_dir) sp = StructuredPerceptron(model, verbose=1, max_iter=5, average=False) import warnings with warnings.catch_warnings(): warnings.simplefilter("ignore") sp.fit(X, binarized_Y) np.save(os.path.join(output_dir, "params"), sp.w) w = sp.w
def run(): loglevel = "WARNING" home = expanduser("~") prog = os.path.basename(__file__) cfn = home + '/' + '.' + os.path.splitext(prog)[0] + '.conf' p = argparse.ArgumentParser(description="yaml json k8s laundry", formatter_class=SmartFormatter) # overall app related stuff p.add_argument('-p', '--pretty', action='store_true', dest='pretty', default=False) p.add_argument('-t', '--type', action='store', dest='output_type', default='yaml', choices=['json','yaml'], help='Output type, json or yaml' ) # these are the command line leftovers, the files to process p.add_argument('files', nargs='*') # non application related stuff p.add_argument('-l', '--loglevel', action='store', dest='loglevel', default=loglevel, choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'], help='Log level (DEBUG,INFO,WARNING,ERROR,CRITICAL) default is: '+loglevel) p.add_argument('-s', '--save', action='store_true', dest='save', default=False, help='save select command line arguments (default is always) in "'+cfn+'" file') # read in defaults from ~/.PROGBASENAMENOSUFFIX # if the file exists if os.path.isfile(cfn): argparse_config.read_config_file(p,cfn) # parse arguments (after reading defaults from ~/.dot file args = p.parse_args() if args.loglevel: loglevel = args.loglevel # set our logging level (from -l INFO (or whatever)) numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s', loglevel) logging.basicConfig(level=numeric_level) logging.info('Program starting :%s', prog) logging.debug('Arg: pretty :%s', args.pretty) logging.debug('Arg: type :%s', args.output_type) logging.debug('Arg: loglevel :%s', loglevel) logging.debug('Arg: save :%s', args.save) # save to the defaults file if a -s specified on command line if args.save: f = open(cfn, 'w') # remove the 'save' from the file f.write(re.sub('\naddminion\n', re.sub('\nsave\n','\n',argparse_config.generate_config(p, args, section='default')))) f.close() # here we go, start of program here s_out = { "apiVersion":"v1", "kind":"List", "items":[] } av = args.files if len(av) > 0: for i in range(len(av)): i_file = None if av[i] == '-': i_file = sys.stdin else: i_file = open(av[i]) s_out['items'].append(yaml.load(i_file.read())) else: logging.debug("no arguments specified, stdin assumed") s_out['items'].append(yaml.load(sys.stdin)) # if we are only messing with a single file... s = s_out if len(av) < 2: s = s_out['items'][0] if args.output_type == 'yaml': print yaml.dump(s) elif args.output_type == 'json': if args.pretty: print json.dumps(s, sort_keys=True, indent=4, separators=(',', ': ')) else: print json.dumps(s) else: raise ValueError('Invalid output type : %s', args.output_type)
def run(): loglevel = "INFO" home = expanduser("~") prog = os.path.basename(__file__) cfn = home + '/' + '.' + os.path.splitext(prog)[0] + '.conf' def_url = 'http://localhost/MAAS/api/1.0' def_key = 'null' def_command = '/nodes/?op=list' p = argparse.ArgumentParser(description="MaaS utility cli", formatter_class=SmartFormatter) # pick up the maas related arguments p.add_argument('-u', '--url', action='store', dest='url', default=def_url, help='This is the maas url to connect to, default : ' + def_url) p.add_argument('-k', '--key', action='store', dest='key', help='This is the maas admin api key, default :' + def_key) p.add_argument('-f', '--file', action='store', dest='filename', help='This is the jinja2 template file : ') p.add_argument('-t', '--template', action='store', dest='template', help='This is the template text on the command line, or use the word RAW for raw json output') p.add_argument('--list', action='store_true', dest='template_list', help='Use this template shortcut to create --list output for ansible (dynamic inventory)') p.add_argument('-c', '--command', action='store', dest='command', default=def_command, help='This is the maas uri, e.g. /nodes/?op=list, default : ' + def_command) # non application related stuff p.add_argument('-v', '--version', action='store_true', dest='version', default=False, help='this switch will just return the version and exit, current version is : ' + __version__) p.add_argument('-l', '--loglevel', action='store', dest='loglevel', default=loglevel, choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'], help='Log level (DEBUG,INFO,WARNING,ERROR,CRITICAL) default is: '+loglevel) # this argument saves the current argument list # in a .maasutil.conf file in the current user's home # directory. This does put a api key in that file, so, # in secure environment this shouldn't be used p.add_argument('-s', '--save', action='store_true', dest='save', default=False, help='save select command line arguments (default is never) in "'+cfn+'" file') # read in defaults from ~/.PROGBASENAMENOSUFFIX # if the file exists if os.path.isfile(cfn): argparse_config.read_config_file(p,cfn) # parse arguments (after reading defaults from ~/.dot file args = p.parse_args() if args.loglevel: loglevel = args.loglevel # set our logging level (from -l INFO (or whatever)) numeric_level = getattr(logging, loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s', loglevel) logging.basicConfig(level=numeric_level) if args.version: print __version__ sys.exit(0) # save to the defaults file if a -s specified on command line if args.save: logging.info('Saving arguments to :%s', cfn) f = open(cfn, 'w') apc = re.sub('\nsave\n','\n', argparse_config.generate_config(p, args, section='default')) f.write(apc) f.close() # # this logic requires -f or -t argument, if both are specified # a warning is issued, if neither we exit. Otherwise, we load # template_text with the template from either a file or a command line. # if args.template_list: args.template = ''' {# ## t-nodeslist - template to product a text list of nodes ## ## src = /nodes/?op=list ## ## output ## hostname,system_id,status,textstatus ## #} { {# a goofy way to come up with a unique list of tags #} {%- set gname = [] -%} {%- for h in src -%} {%- for i in h.tag_names -%} {%- if not i in gname -%} {%- set _ = gname.append(i) -%} {%- endif -%} {%- endfor -%} {%- endfor -%} {%- for i in gname -%} "{{i}}": { "hosts" : [ {%- set lcomma = '' -%} {%- for h in src -%} {%- if i in h.tag_names -%} {{lcomma}}"{{ h.hostname -}}" {%- set lcomma = ',' -%} {%- endif -%} {%- endfor -%} ] }, {% endfor %} "_meta": { {% set hcomma='' %} "hostvars" : { {%- for h in src -%} {{hcomma}}"{{ h.hostname -}}" : { "system_id": "{{ h.system_id }}", "status": {{ h.status }}, "status_text": {%- if h.status == 1 -%} "commissioning" {%- elif h.status == 2 -%} "2" {%- elif h.status == 3 -%} "3" {%- elif h.status == 4 -%} "ready" {%- elif h.status == 5 -%} "5" {%- elif h.status == 6 -%} "deployed" {%- else -%} "unknown" {%- endif %} } {% set hcomma=',' %} {% endfor -%} } } } ''' if not args.filename and not args.template: raise RuntimeError('Must supply either -f templatefile or -t templatetext') if args.filename and args.template: logging.warning('BOTH -f and -t specified, -t will override -f!!') template_text = '' if args.template: template_text = args.template logging.debug("Command line template text: \n%s\n", template_text) else: try: template_file = open(args.filename, 'r') template_text = template_file.read() logging.debug("Template file (%s): \n%s\n", args.filename, template_text) except Exception as e: logging.fatal('Error with template file %s:[%s]', args.filename, str(e)) logging.info('Program starting :%s', prog) logging.debug('Arg: loglevel :%s', loglevel) logging.debug('Arg: url :%s', args.url) logging.debug('Arg: filename :%s', args.filename) logging.debug('Arg: template :%s', args.template) logging.debug('Arg: command :%s', args.command) logging.debug('Arg: save :%s', args.save) # rd contains the dictionary kp = args.key.split(':') response = perform_API_request(args.url, args.command, 'GET', kp[1], kp[2], kp[0]) logging.debug("response header :%s", response[0]) logging.debug("response content :%s", response[1]) try: rd = json.loads(response[1]) except: rd = [] if template_text == 'RAW': print json.dumps(rd) else: td = Template(template_text) tr = td.render(src=rd) print tr, sys.exit(0)