def vrocli(verbose=False): """ vRealize Automation coder/command line interface """ if verbose: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO)
def main(_): if tf.gfile.Exists(FLAGS.log_dir): tf.gfile.DeleteRecursively(FLAGS.log_dir) tf.gfile.MakeDirs(FLAGS.log_dir) if FLAGS.debug: logger.setLevel(logging.DEBUG) run_training(FLAGS.debug)
def main(): # parse cli args args = parse_args(sys.argv[1:]) # set log level if args.verbose is 1: logger.setLevel(logging.INFO) elif args.verbose > 1: logger.setLevel(logging.DEBUG) args.func(args)
def init_inference_session(args): options = aclruntime.session_options() if args.acl_json_path != None: options.acl_json_path = args.acl_json_path if args.debug == True: logger.setLevel(logging.DEBUG) options.log_level = 1 session = aclruntime.InferenceSession(args.model, args.device_id, options) set_session_options(session, args) logger.debug("session info:{}".format(session)) return session
def main(): cherrypy.config.update({ 'log.screen': True, 'log.access_file': '', 'log.error_file': '' }) telebot.logger.setLevel(logging.INFO) logger.setLevel(logging.INFO) sh = logging.StreamHandler() sh.setLevel(logging.INFO) fh = logging.FileHandler('alphabot.log', encoding='utf-8') fh.setLevel(logging.INFO) formatter = logging.Formatter(u'[%(asctime)s]: %(message)s') sh.setFormatter(formatter) fh.setFormatter(formatter) logger.addHandler(sh) logger.addHandler(fh) ''' Automatic posting of birthday greetings in chat ''' t1 = threading.Thread(target=birthday_method) t1.daemon = True t1.start() bot.set_update_listener(listener) # logger.info("Waiting 5 minutes before the start...") # print("Start") # threading.Timer(5*60, onStartProcessing).start() # cherrypy.quickstart(WebhookServer(), config.WEBHOOK_URL_PATH, {'/': {}}) telegram_polling()
class Parameters: program_dir = Path(abspath(__file__)).parents[1] config_dir = 'config' config_name = 'default_config.json' config_path = join(program_dir, config_dir, config_name) #try to load the config file try: #load json file here config_dict = load_json_file(config_path) except FileNotFoundError: logger.critical(f'config file was not found at: {config_path}') exit(1) except Exception as e: logger.critical(f'failed to load config file: {e}') exit(1) if is_valid_config(config_dict): #set all parameters here root_dir = Path(abspath(__file__)).parents[1] whiskybroker_base_url = config_dict['whiskybroker base url'] email_address = config_dict['email address'] data_dir = join(root_dir, config_dict['data dir']) logging_level, status = get_logging_level_from_parameter( config_dict['general logging level']) #TODO: maybe change the below logging to debug logging.info(f'logging level parsing status: {status}') logger.info(f'setting general logging level to: {logging_level}') logger.setLevel(logging_level) else: #TODO: check why the config dict was not valid, and return any missing keys/wrong values logger.critical(f'config file contained something wrong...') exit(1)
def main(argv=None): opts = create_parser().parse_args(argv) opts.users = None logger.setLevel(getattr(logging, opts.loglevel)) logger_handler.setLevel(getattr(logging, opts.loglevel)) try: cloud = cloud_connect(opts.config) if opts.cmd == 'vms': print "\n".join(sorted(cloud)) else: if opts.cmd == 'start': for name in opts.vmnames: cloud.start_vm(name, opts.users, opts.prepare) elif opts.cmd == 'stop': for name in opts.vmnames: cloud.stop_vm(name, timeout1=opts.wait_time) elif opts.cmd == 'login': assert len(opts.vmnames) == 1 cloud.login_to_vm(opts.vmnames[0], opts.users) elif opts.cmd == 'list': for domain in cloud.list_vms(): try: all_ips = ", ".join(cloud.get_vm_ips(domain.name())) except socket.error as err: if err.errno != errno.EPERM: raise all_ips = "Not enought permissions for arp-scan" print "{0:>5} {1:<15} => {2}".format( domain.ID(), domain.name(), all_ips) elif opts.cmd == 'wait_ip': tend = time.time() + opts.wait_time for vmname in opts.vmnames: while True: try: ips = list(cloud.get_vm_ips(vmname)) except socket.error as err: if err.errno != errno.EPERM: raise print "Not enought permissions for arp-scan" return 1 if len(ips) != 0: print "{0:<15} => {1}".format( vmname, " ".join(ips)) break if time.time() >= tend: print "VM {0} don't get ip in time".format(vmname) return 1 time.sleep(0.01) elif opts.cmd == 'wait_ssh': tend = time.time() + opts.wait_time for vmname in opts.vmnames: while True: try: ip = cloud.get_vm_ssh_ip(vmname) except socket.error as err: if err.errno != errno.EPERM: raise print "Not enought permissions for arp-scan" return 1 if ip is not None: print "{0:<15} => {1}".format(vmname, ip) break if time.time() >= tend: templ = "VM {0} don't start ssh server in time" print templ.format(vmname) return 1 time.sleep(0.01) else: print >> sys.stderr, "Error : Unknown cmd {0}".format(opts.cmd) except CloudError as err: print >> sys.stderr, err return 1 return 0
def model_test(X, Y, method): amount = 0 times = 0 for cv_data, cv_target, test_data, test_target in SAMPLE.iter_sample_data( X, Y, method): times += 1 model = LogisticRegression(delta=0.01, alpha=0.01) model.fit(cv_data, cv_target) predict_y = model.predict(test_data) amount += FORMULA.cal(predict_y, test_target) return float(amount) / times if __name__ == '__main__': filename = "../data/iris.csv" X, Y = FU.load_iris_data(filename) X = StandardScaler().fit_transform(X) X = FORMULA.plus_one(X) LOGGER.setLevel(LOGGER.LEVEL_NORMAL) print u"10折交叉法:", model_test(X, Y, 10) print u"留一法:", model_test(X, Y, 1) # model.draw_data(X, Y) # model.draw_line(X) # model.draw_loss() # plt.show()
ret[band] = [hpx,maglim] return ret if __name__ == "__main__": import argparse description = __doc__ parser = argparse.ArgumentParser(description=description) parser.add_argument('config',nargs='?') parser.add_argument('-n','--nside',default=NSIDE,type=int) parser.add_argument('-v','--verbose',action='store_true') parser.add_argument('-s','--survey',default='des') args = parser.parse_args() if args.verbose: logger.setLevel(logger.INFO) if args.config: config = yaml.load(open(args.config)) BANDS = config['bands'] NSIDE = args.nside outdir = mkdir('release/depth') infiles = sorted(glob.glob('cat/cat_hpx_*.fits')) p = Pool(maxtasksperchild=1,processes=20) out = p.map(depth,infiles) skymaps = dict()
import os import logging import numpy as np import tensorflow as tf from tensorflow.python.ops import rnn_cell, rnn from tensorflow.python.platform import gfile from utils import r2_score, mean_squared_error from utils import logger, Print, PrintMess logger.setLevel("CRITICAL") logging.disable("INFO") tf.app.flags.DEFINE_boolean("print_twice", False, "Work interactively or not?") tf.app.flags.DEFINE_float("learning_rate", 0.001, "Learning Rate") tf.app.flags.DEFINE_float("max_grad_norm", 1., "Clipping gradient norm") tf.app.flags.DEFINE_float("init_scale", .1, "Norm of initial weights") tf.app.flags.DEFINE_integer("hidden_dim", 100, "hidden size of Neural net") tf.app.flags.DEFINE_integer("num_layers", 2, "Number of layers in neural net") tf.app.flags.DEFINE_integer("input_dim", 1, "Dimension of the target") tf.app.flags.DEFINE_integer("output_dim", 1, "Dimension of the target") tf.app.flags.DEFINE_integer("batch_size", 128, "Batch Size for SGD") tf.app.flags.DEFINE_integer("n_iter", 100000, "Number of Iteration") tf.app.flags.DEFINE_integer("n_valid", 5000, "Number of obs for valid set") tf.app.flags.DEFINE_integer("freq_mess", 100, "Print a message every ... iter") tf.app.flags.DEFINE_integer("seed", 2, "Random Number Seed") tf.app.flags.DEFINE_string("task", "poly_eval", """Choose the task from poly_eval, poly_der_eval, and poly_div, newton_eval""") tf.app.flags.DEFINE_string("train_degrees", "5,10,15", "The degrees to train") tf.app.flags.DEFINE_string("valid_degrees", "20", "The single degree to valid")
def main(argv=None): opts = create_parser().parse_args(argv) opts.users = None logger.setLevel(getattr(logging, opts.loglevel)) logger_handler.setLevel(getattr(logging, opts.loglevel)) try: cloud = cloud_connect(opts.config) if opts.cmd == 'vms': print "\n".join(sorted(cloud)) else: if opts.cmd == 'start': for name in opts.vmnames: cloud.start_vm(name, opts.users, opts.prepare) elif opts.cmd == 'stop': for name in opts.vmnames: cloud.stop_vm(name, timeout1=opts.wait_time) elif opts.cmd == 'login': assert len(opts.vmnames) == 1 cloud.login_to_vm(opts.vmnames[0], opts.users) elif opts.cmd == 'list': for domain in cloud.list_vms(): try: all_ips = ", ".join(cloud.get_vm_ips(domain.name())) except socket.error as err: if err.errno != errno.EPERM: raise all_ips = "Not enought permissions for arp-scan" print "{0:>5} {1:<15} => {2}".format(domain.ID(), domain.name(), all_ips) elif opts.cmd == 'wait_ip': tend = time.time() + opts.wait_time for vmname in opts.vmnames: while True: try: ips = list(cloud.get_vm_ips(vmname)) except socket.error as err: if err.errno != errno.EPERM: raise print "Not enought permissions for arp-scan" return 1 if len(ips) != 0: print "{0:<15} => {1}".format(vmname, " ".join(ips)) break if time.time() >= tend: print "VM {0} don't get ip in time".format(vmname) return 1 time.sleep(0.01) elif opts.cmd == 'wait_ssh': tend = time.time() + opts.wait_time for vmname in opts.vmnames: while True: try: ip = cloud.get_vm_ssh_ip(vmname) except socket.error as err: if err.errno != errno.EPERM: raise print "Not enought permissions for arp-scan" return 1 if ip is not None: print "{0:<15} => {1}".format(vmname, ip) break if time.time() >= tend: templ = "VM {0} don't start ssh server in time" print templ.format(vmname) return 1 time.sleep(0.01) else: print >>sys.stderr, "Error : Unknown cmd {0}".format(opts.cmd) except CloudError as err: print >>sys.stderr, err return 1 return 0
import os import logging import numpy as np import tensorflow as tf from tensorflow.python.ops import rnn_cell, rnn from tensorflow.python.platform import gfile from utils import r2_score, mean_squared_error from utils import logger, Print, PrintMess logger.setLevel("CRITICAL") logging.disable("INFO") tf.app.flags.DEFINE_boolean("print_twice", False, "Work interactively or not?") tf.app.flags.DEFINE_float("learning_rate", 0.001, "Learning Rate") tf.app.flags.DEFINE_float("max_grad_norm", 1., "Clipping gradient norm") tf.app.flags.DEFINE_float("init_scale", .1, "Norm of initial weights") tf.app.flags.DEFINE_integer("hidden_dim", 100, "hidden size of Neural net") tf.app.flags.DEFINE_integer("num_layers", 2, "Number of layers in neural net") tf.app.flags.DEFINE_integer("input_dim", 1, "Dimension of the target") tf.app.flags.DEFINE_integer("output_dim", 1, "Dimension of the target") tf.app.flags.DEFINE_integer("batch_size", 128, "Batch Size for SGD") tf.app.flags.DEFINE_integer("n_iter", 100000, "Number of Iteration") tf.app.flags.DEFINE_integer("n_valid", 5000, "Number of obs for valid set") tf.app.flags.DEFINE_integer("freq_mess", 100, "Print a message every ... iter") tf.app.flags.DEFINE_integer("seed", 2, "Random Number Seed") tf.app.flags.DEFINE_string( "task", "poly_eval", """Choose the task from poly_eval, poly_der_eval, and poly_div, newton_eval""") tf.app.flags.DEFINE_string("train_degrees", "5,10,15", "The degrees to train")
'--verbose', action='count', help='Increase verbosity') parser.add_argument('-l', '--limit', type=int, default=10, help='Number of results') parser.add_argument('-u', '--url', default='https://news.ycombinator.com/rss', help='Starting page') args = parser.parse_args() if args.verbose: logger.setLevel('DEBUG' if args.verbose > 1 else 'INFO') db = DB('db') action, *extra = args.action if action == 'complete': for item in extra: for suggestion in db.complete(item): print(item, suggestion) elif action == 'search': for doc in db.search(extra): print(doc) elif action == 'crawl': crawl(db, args.url) elif action == 'insert': fragments = TextParser.get_text(args.url) db.insert(args.url, list(fragments))