コード例 #1
1
def configure_logging():
    logging.basicConfig(
        filename='mv_gp_log_{:%Y%m%d_%H%M%S}.txt'.format(datetime.now()),
        level=logging.DEBUG,
        format='%(asctime)s: %(levelname)7s: [%(name)s]: %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
コード例 #2
0
ファイル: test_inline.py プロジェクト: anirudhreddy92/mrjob
    def test_cmdenv(self):
        import logging
        logging.basicConfig()
        # make sure previous environment is preserved
        os.environ['SOMETHING'] = 'foofoofoo'
        old_env = os.environ.copy()

        mr_job = MRTestCmdenv(['--runner', 'inline', '--cmdenv=FOO=bar'])
        mr_job.sandbox(stdin=BytesIO(b'foo\n'))

        results = []

        with mr_job.make_runner() as runner:
            assert isinstance(runner, InlineMRJobRunner)
            runner.run()

            for line in runner.stream_output():
                key, value = mr_job.parse_output_line(line)
                results.append((key, value))

        self.assertEqual(sorted(results),
                         [('FOO', 'bar'), ('SOMETHING', 'foofoofoo')])

        # make sure we revert back
        self.assertEqual(old_env, os.environ)
コード例 #3
0
ファイル: ossipee.py プロジェクト: jdennis/ossipee
def main():
    args_file = sys.argv[1]
    args_data = file(args_file).read()
    arguments = shlex.split(args_data)
    worker = 'all'
    action = WorkItemList.display

    for arg in arguments:
        # ignore any arguments without an equals in it
        if '=' in arg:
            (key, value) = arg.split('=')
            if key == 'worker':
                worker = workers[value]
            if key == 'action':
                if value == 'create':
                    action = WorkItemList.create
                elif value == 'teardown':
                    action = WorkItemList.teardown
                elif value == 'display':
                    action = WorkItemList.display

    logging.basicConfig(level=logging.ERROR)

    action(worker)
    print json.dumps({
        'success': True,
        'args': args_data
    })
コード例 #4
0
ファイル: deploy.27.py プロジェクト: pabvenegas/general
def main():

    parser = argparse.ArgumentParser(description='Deploy interface.')
    parser.add_argument('--version', action='version', version=APP + " " + VERSION)
    parser.add_argument('--logging', dest='log_level', action='store',
                        default='DEBUG', choices=['DEBUG', 'INFO'],
                        help='Minimum level of logging message to show. Default (DEBUG)')

    subparsers = parser.add_subparsers(dest='cmd')

    parser_a = subparsers.add_parser('install',
                                     help='Run install')
    parser_a.set_defaults(func=install)

    parser_a = subparsers.add_parser('uninstall',
                                     help='Run uninstall')
    parser_a.set_defaults(func=uninstall)

    if len(sys.argv) == 1:
        error_msg = "ERROR: No arguments supplied!"
        print >> sys.stderr, error_msg
        parser.print_help()
        sys.exit(1)

    args = parser.parse_args()

    logging.basicConfig(level=args.log_level,
                        format=LOGGING_FORMAT)

    args.func(args)
コード例 #5
0
ファイル: cli.py プロジェクト: ghostRider1124/udiskie
    def __init__(self, argv=None):
        """
        Parse command line options, read config and initialize members.

        :param list argv: command line parameters
        """
        # parse program options (retrieve log level and config file name):
        args = docopt(self.usage, version=self.name + ' ' + self.version)
        default_opts = self.option_defaults
        program_opts = self.program_options(args)
        # initialize logging configuration:
        log_level = program_opts.get('log_level', default_opts['log_level'])
        if log_level <= logging.DEBUG:
            fmt = _('%(levelname)s [%(asctime)s] %(name)s: %(message)s')
        else:
            fmt = _('%(message)s')
        logging.basicConfig(level=log_level, format=fmt)
        # parse config options
        config_file = OptionalValue('--config')(args)
        config = udiskie.config.Config.from_file(config_file)
        options = {}
        options.update(default_opts)
        options.update(config.program_options)
        options.update(program_opts)
        # initialize instance variables
        self.config = config
        self.options = options
        self._init(config, options)
コード例 #6
0
ファイル: deploy.py プロジェクト: adityaathalye/Ally-Py
def deploy():
    assert isinstance(application.options, OptionsCore), 'Invalid application options %s' % application.options
    if not application.options.start: return
    try:
        if not os.path.isfile(application.options.configurationPath):
            print('The configuration file "%s" doesn\'t exist, create one by running the the application '
                  'with "-dump" option' % application.options.configurationPath, file=sys.stderr)
            sys.exit(1)
        with open(application.options.configurationPath, 'r') as f: config = load(f)

        assembly = application.assembly = ioc.open(aop.modulesIn('__setup__.**'), config=config)
        assert isinstance(assembly, Assembly), 'Invalid assembly %s' % assembly
        
        import logging
        logging.basicConfig(format=format())
        for name in warning_for(): logging.getLogger(name).setLevel(logging.WARN)
        for name in info_for(): logging.getLogger(name).setLevel(logging.INFO)
        for name in debug_for(): logging.getLogger(name).setLevel(logging.DEBUG)
        
        try: assembly.processStart()
        finally: ioc.deactivate()
    except SystemExit: raise
    except (SetupError, ConfigError):
        print('-' * 150, file=sys.stderr)
        print('A setup or configuration error occurred while deploying, try to rebuild the application properties by '
              'running the the application with "configure components" options', file=sys.stderr)
        traceback.print_exc(file=sys.stderr)
        print('-' * 150, file=sys.stderr)
    except:
        print('-' * 150, file=sys.stderr)
        print('A problem occurred while deploying', file=sys.stderr)
        traceback.print_exc(file=sys.stderr)
        print('-' * 150, file=sys.stderr)
コード例 #7
0
ファイル: download.py プロジェクト: hatate/4chan-downloader
def main():
	parser = argparse.ArgumentParser(description='inb4404')
	parser.add_argument('thread', nargs=1, help='url of the thread')
	args = parser.parse_args()

	logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%I:%M:%S %p')

	workpath = os.path.dirname(os.path.realpath(__file__))
	board = ''.join(args.thread).split('/')[3]
	thread = ''.join(args.thread).split('/')[5].split('#')[0]

	directory = os.path.join(workpath, 'downloads', board, thread)
	if not os.path.exists(directory):
		os.makedirs(directory)

	os.chdir(directory)

	while len(args.thread):
		for t in args.thread:
			try:
				for link, img in re.findall('(\/\/i.4cdn.org/\w+\/(\d+\.(?:jpg|png|gif|webm)))', load(t)):
					if not os.path.exists(img):
						log.info(img)
						data = load('https:' + link)
						with open(img, 'w') as f:
							f.write(data)
			except urllib2.HTTPError, err:
				log.info('%s 404\'d', t)
				args.thread.remove(t)
				continue
			except (urllib2.URLError, httplib.BadStatusLine, httplib.IncompleteRead):
				log.warning('something went wrong')
コード例 #8
0
def script_to_py3(script):
    """Convert a script to Python3 syntax if required."""
    if sys.version_info[0] < 3:
        return script

    import tempfile
    f = tempfile.NamedTemporaryFile(suffix=".py", delete=False)
    f.write(script.encode())
    f.flush()
    filename = f.name
    f.close()

    # 2to3 is way too chatty
    import logging
    logging.basicConfig(filename=os.devnull)

    from lib2to3.main import main
    if main("lib2to3.fixes", ['--no-diffs', '-w', '-n', filename]):
        raise Exception('py3 conversion failed')

    f2 = open(filename)
    try:
        return f2.read()
    finally:
        f2.close()
        os.remove(filename)
コード例 #9
0
def main():
  SRC_DEFAULT = '[emoji]/build/compressed_pngs'
  PREFIX_DEFAULT = 'android_'

  parser = argparse.ArgumentParser()
  parser.add_argument(
      '-s', '--src_dir', help='source images (default \'%s\')' % SRC_DEFAULT,
      default=SRC_DEFAULT, metavar='dir')
  parser.add_argument(
      '-d', '--dst_dir', help='destination directory', metavar='dir',
      required=True)
  parser.add_argument(
      '-p', '--prefix', help='prefix for thumbnail (default \'%s\')' %
      PREFIX_DEFAULT, default=PREFIX_DEFAULT, metavar='str')
  parser.add_argument(
      '-c', '--crop', help='crop images (will automatically crop if '
      'src dir is the default)', action='store_true')
  parser.add_argument(
      '-v', '--verbose', help='write log output', metavar='level',
      choices='warning info debug'.split(), const='info',
      nargs='?')
  args = parser.parse_args()

  if args.verbose is not None:
    logging.basicConfig(level=getattr(logging, args.verbose.upper()))

  crop = args.crop or (args.src_dir == SRC_DEFAULT)
  create_thumbnails_and_aliases(
      args.src_dir, args.dst_dir, crop, args.prefix)
コード例 #10
0
def parse_args():
    """Parse arguments and sets up logging verbosity.

    :rtype: normal options and arguments as tuple.
    """
    parser = optparse.OptionParser(__doc__)
    parser.add_option("-f", "--file", dest="filename",
        help="setting file", metavar="FILE")
    parser.add_option("-o", "--output", dest="output",
        help="output file", metavar="FILE")
    parser.add_option("-n", "--dryrun", dest="dryrun",
        help="dry run", default=False, action="store_true")
    parser.add_option("-v", "--verbose", dest="verbose",
        default=False, action="store_true", help="verbose mode")
    parser.add_option("-q", "--quiet", dest="quiet",
        default=False, action="store_true", help="quiet mode")

    opts, args = parser.parse_args()

    if opts.verbose:
        logging.basicConfig(level=logging.DEBUG)
    elif not opts.quiet:
        logging.basicConfig(level=logging.INFO)

    return opts, args
コード例 #11
0
ファイル: parsemail.py プロジェクト: joselamego/patchwork
def main(args):
    django.setup()
    logger = setup_error_handler()
    parser = argparse.ArgumentParser()
    parse_lock = None

    def list_logging_levels():
        """Give a summary of all available logging levels."""
        return sorted(list(VERBOSITY_LEVELS.keys()),
                      key=lambda x: VERBOSITY_LEVELS[x])

    parser.add_argument('--verbosity', choices=list_logging_levels(),
                        help='logging level', default='info')

    args = vars(parser.parse_args())

    logging.basicConfig(level=VERBOSITY_LEVELS[args['verbosity']])

    mail = message_from_file(sys.stdin)
    try:
        parse_lock = lock()
        return parse_mail(mail)
    except:
        if logger:
            logger.exception('Error when parsing incoming email', extra={
                'mail': mail.as_string(),
            })
        raise
    finally:
        release(parse_lock)
コード例 #12
0
ファイル: EncodingDecoding.py プロジェクト: kr-manish/mani
def main():

    # Get all the arguments
    args = argument_parser()

    # Check the verbosity level
    if args.verbose:
        logging.basicConfig(level=logging.DEBUG, format='%(funcName)s:%(levelname)s:%(message)s')
    else:
        logging.basicConfig(level=logging.INFO, format='%(funcName)s:%(levelname)s:%(message)s')

    method = args.method
    action = 'encode' if args.encode else 'decode'
    data = args.encode if action=='encode' else args.decode

    logging.debug("{} this {} string using {}".format(action, data, method))

    if method == 'base64':
        base64_encode_decode(action, data)

    if method == 'caeser':
        key = args.key
        if key >=0 and key < 27:
            caeser_cypher(key, action, data)
        else:
            logging.error("Key should be in the range 0-26")
コード例 #13
0
def get_parameters():
	global host
	global port
	global thr
	global item
	optp = OptionParser(add_help_option=False,epilog="Hammers")
	optp.add_option("-q","--quiet", help="set logging to ERROR",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
	optp.add_option("-s","--server", dest="host",help="attack to server ip -s ip")
	optp.add_option("-p","--port",type="int",dest="port",help="-p 80 default 80")
	optp.add_option("-t","--turbo",type="int",dest="turbo",help="default 135 -t 135")
	optp.add_option("-h","--help",dest="help",action='store_true',help="help you")
	opts, args = optp.parse_args()
	logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
	if opts.help:
		usage()
	if opts.host is not None:
		host = opts.host
	else:
		usage()
	if opts.port is None:
		port = 80
	else:
		port = opts.port
	if opts.turbo is None:
		thr = 135
	else:
		thr = opts.turbo
コード例 #14
0
ファイル: devcron.py プロジェクト: dbenamy/devcron
def main():
    prog = 'devcron.py'
    usage = 'usage: %prog [options] crontab'
    description = 'A development cron daemon. See README.md for more info.'

    op = optparse.OptionParser(prog=prog, usage=usage, description=description)
    op.add_option('-v', '--verbose', dest='verbose', action='store_true',
                  help='verbose logging.')

    (options, args) = op.parse_args()

    if len(args) != 1:
        op.print_help()
        sys.exit(1)

    log_level = logging.WARN
    if options.verbose:
        log_level = logging.DEBUG

    logging.basicConfig(level=log_level)

    crontab_data = open(args[0]).read()
    crontab_data = fold_crontab_lines(crontab_data)
    crontab_data = edit_crontab_data(crontab_data)
    logging.debug("Edited crontab looks like:\n%s\n" % crontab_data)
    events = parse_crontab(crontab_data)
    logging.debug("Parsed crontab as:\n%s\n" %
                  '\n'.join([str(e) for e in events]))
    cron = Cron(events)
    cron.run()
コード例 #15
0
ファイル: run.py プロジェクト: dynaryu/oq-risklib
def run(job_ini, concurrent_tasks=None,
        loglevel='info', hc=None, exports=''):
    """
    Run a calculation. Optionally, set the number of concurrent_tasks
    (0 to disable the parallelization).
    """
    logging.basicConfig(level=getattr(logging, loglevel.upper()))
    job_inis = job_ini.split(',')
    assert len(job_inis) in (1, 2), job_inis
    monitor = performance.Monitor('total', measuremem=True)

    if len(job_inis) == 1:  # run hazard or risk
        oqparam = readinput.get_oqparam(job_inis[0], hc_id=hc)
        if hc and hc < 0:  # interpret negative calculation ids
            calc_ids = datastore.get_calc_ids()
            try:
                hc = calc_ids[hc]
            except IndexError:
                raise SystemExit('There are %d old calculations, cannot '
                                 'retrieve the %s' % (len(calc_ids), hc))
        calc = base.calculators(oqparam, monitor)
        monitor.monitor_dir = calc.datastore.calc_dir
        with monitor:
            calc.run(concurrent_tasks=concurrent_tasks, exports=exports,
                     hazard_calculation_id=hc)
    else:  # run hazard + risk
        calc = run2(
            job_inis[0], job_inis[1], concurrent_tasks, exports, monitor)

    logging.info('Total time spent: %s s', monitor.duration)
    logging.info('Memory allocated: %s', general.humansize(monitor.mem))
    monitor.flush()
    print('See the output with hdfview %s/output.hdf5' %
          calc.datastore.calc_dir)
    return calc
コード例 #16
0
ファイル: simulate.py プロジェクト: synapticarbors/we_example
def run(NUM_BLOCKS, STEPS_PER_BLOCK, BLOCKS_PER_DUMP, sim_params):

    print('Setting up logging')
    logging.basicConfig(filename='sim.log', level=logging.DEBUG)
    logging.info('NUM_BLOCKS: {}'.format(NUM_BLOCKS))
    logging.info('STEPS_PER_BLOCK: {}'.format(STEPS_PER_BLOCK))
    logging.info('BLOCKS_PER_DUMP: {}'.format(BLOCKS_PER_DUMP))

    print('Instantiating sampler')
    h = sim_params['h']
    r0 = sim_params['r0']
    dr = sim_params['dr']
    outname = sim_params['outname']
    sampler = mcsampler.Sampler(h, r0, dr, np.random.randint(2**32-1))

    # Setup h5 file
    h5 = h5py.File(outname, 'w')
    h5coords = h5.create_dataset('coords', shape=(NUM_BLOCKS,), compression=9, scaleoffset=2,
            dtype=np.float32, chunks=(BLOCKS_PER_DUMP,))

    # Initial coords
    x = r0

    totblocks = NUM_BLOCKS // BLOCKS_PER_DUMP
    temp_coords = np.zeros((BLOCKS_PER_DUMP,), dtype=coord_dtype)

    print('Starting Simulation')
    for dki, dk in enumerate(xrange(totblocks)):
        t1 = time.time()
        sampler.step(x, temp_coords, BLOCKS_PER_DUMP*STEPS_PER_BLOCK, STEPS_PER_BLOCK)

        h5coords[dki*BLOCKS_PER_DUMP:(dki+1)*BLOCKS_PER_DUMP] = temp_coords[:]
        logging.info('Completed {} of {} steps: {} s'.format(dk,totblocks-1, time.time() - t1))

    h5.close()
コード例 #17
0
ファイル: train.py プロジェクト: SigmaQuan/NMT-Coverage
def main():
    args = parse_args()

    state = getattr(experiments.nmt, args.proto)()
    if args.state:
        if args.state.endswith(".py"):
            state.update(eval(open(args.state).read()))
        else:
            with open(args.state) as src:
                state.update(cPickle.load(src))
    for change in args.changes:
        state.update(eval("dict({})".format(change)))

    logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
    logger.debug("State:\n{}".format(pprint.pformat(state)))

    rng = numpy.random.RandomState(state['seed'])
    enc_dec = RNNEncoderDecoder(state, rng, skip_init=args.skip_init, compute_alignment=True)
    enc_dec.build()
    lm_model = enc_dec.create_lm_model()

    logger.debug("Load data")
    train_data = get_batch_iterator(state)
    logger.debug("Compile trainer")
    algo = eval(state['algo'])(lm_model, state, train_data)
    logger.debug("Run training")
    main = MainLoop(train_data, None, None, lm_model, algo, state, None,
            reset=state['reset'],
            hooks=[RandomSamplePrinter(state, lm_model, train_data)]
                if state['hookFreq'] >= 0
                else None)
    if state['reload']:
        main.load()
    if state['loopIters'] > 0:
        main.main()
コード例 #18
0
def main():
    """
    Application entry point
    """
    logging.basicConfig(level=logging.DEBUG)
    # create the application and the main window
    app = QtGui.QApplication(sys.argv)
    window = QtGui.QMainWindow()
    
    # setup ui
    ui = example_ui.Ui_MainWindow()
    ui.setupUi(window)
    window.setWindowTitle("QDarkStyle example")

    # tabify dock widgets to show bug #6
    window.tabifyDockWidget(ui.dockWidget1, ui.dockWidget2)

    # setup stylesheet
    app.setStyleSheet(qdarkstyle.load_stylesheet(pyside=True))

        # auto quit after 2s when testing on travis-ci
    if "--travis" in sys.argv:
        QtCore.QTimer.singleShot(2000, app.exit)

    # run
    window.show()
    app.exec_()
コード例 #19
0
ファイル: daemon.py プロジェクト: HanWenfang/dagobah
def init_logger(location, config):
    """ Initialize the logger with settings from config. """

    class NullHandler(logging.Handler):
        def emit(self, record):
            pass

    if get_conf(config, 'Logging.enabled', False) == False:
        handler = NullHandler()
        logging.getLogger("dagobah").addHandler(handler)
        return

    if get_conf(config, 'Logging.logfile', 'default') == 'default':
        path = os.path.join(location, 'dagobah.log')
    else:
        path = config['Logging']['logfile']

    level_string = get_conf(config, 'Logging.loglevel', 'info').upper()
    numeric_level = getattr(logging, level_string, None)

    logging.basicConfig(filename=path, level=numeric_level)

    root = logging.getLogger()
    stdout_logger = logging.StreamHandler(sys.stdout)
    stdout_logger.setLevel(logging.INFO)
    root.addHandler(stdout_logger)

    print 'Logging output to %s' % path
    logging.info('Logger initialized at level %s' % level_string)
コード例 #20
0
ファイル: test262.py プロジェクト: RexSong/test262
def Main():
  code = 0
  parser = BuildOptions()
  (options, args) = parser.parse_args()
  ValidateOptions(options)
  test_suite = TestSuite(options.tests,
                         options.strict_only,
                         options.non_strict_only,
                         options.unmarked_default,
			 options.print_handle)
  test_suite.Validate()
  if options.loglevel == 'debug':
    logging.basicConfig(level=logging.DEBUG)
  elif options.loglevel == 'info':
    logging.basicConfig(level=logging.INFO)
  elif options.loglevel == 'warning':
    logging.basicConfig(level=logging.WARNING)
  elif options.loglevel == 'error':
    logging.basicConfig(level=logging.ERROR)
  elif options.loglevel == 'critical':
    logging.basicConfig(level=logging.CRITICAL)
  if options.cat:
    test_suite.Print(args)
  elif options.list_includes:
    test_suite.ListIncludes(args)
  else:
    code = test_suite.Run(options.command, args,
                          options.summary or options.full_summary,
                          options.full_summary,
                          options.logname,
                          options.junitname)
  return code
コード例 #21
0
ファイル: make.py プロジェクト: euphoris/another-springnote
def main():
    logging.basicConfig(level=logging.DEBUG)

    file_name = os.path.join(TARGET_DIR, 'stop_server.bat')
    if os.access(file_name, os.F_OK):
        logging.info('Trying to stop possibly running server...')
        subprocess.call(file_name, stderr=subprocess.PIPE, shell=True)

    if os.access(TARGET_DIR, os.F_OK):
        shutil.rmtree(TARGET_DIR)
    makedirs(TARGET_DIR, exist_ok=True)

    if IS_WINDOWS:
        deploy_wnmp()
        deploy_dokuwiki('nginx/www')
    else:
        deploy_dokuwiki()

    for pattern in [
            'example.nginx.conf',
            'readme.txt',]:
        for path in glob.glob(os.path.join(TARGET_DIR,
                os.path.normpath(pattern))):
            if os.path.isdir(path):
                shutil.rmtree(path)
            else:
                os.unlink(path)
コード例 #22
0
ファイル: server.py プロジェクト: devdattakulkarni/zuul
    def test_config(self, job_list_path):
        # See comment at top of file about zuul imports
        import zuul.scheduler
        import zuul.launcher.gearman
        import zuul.trigger.gerrit

        logging.basicConfig(level=logging.DEBUG)
        self.sched = zuul.scheduler.Scheduler()
        self.sched.registerReporter(None, 'gerrit')
        self.sched.registerReporter(None, 'smtp')
        self.sched.registerTrigger(None, 'gerrit')
        self.sched.registerTrigger(None, 'timer')
        layout = self.sched.testConfig(self.config.get('zuul',
                                                       'layout_config'))
        if not job_list_path:
            return False

        failure = False
        path = os.path.expanduser(job_list_path)
        if not os.path.exists(path):
            raise Exception("Unable to find job list: %s" % path)
        jobs = set()
        for line in open(path):
            v = line.strip()
            if v:
                jobs.add(v)
        for job in sorted(layout.jobs):
            if job not in jobs:
                print "Job %s not defined" % job
                failure = True
        return failure
コード例 #23
0
ファイル: logger.py プロジェクト: snejy/Tasks
    def log(self, arguments, level = "info", format_with = '%(asctime)s %(message)s'):

        self.logger = logging.getLogger(self.logger_name)

        if not os.path.isfile(self.logfile):
            self.logfile = open(self.logfile, 'w').close()

        logging.basicConfig(filename = self.logfile, format = format_with)

        if type(arguments) == list:
            arguments = ("  ").join(arguments)

        if level == "warning":
            self.logger.setLevel(logging.WARNING)
            self.logger.warning(arguments)

        elif level == "error":
            self.logger.setLevel(logging.ERROR)
            self.logger.error(arguments)

        elif level == "exception":
            self.logger.setLevel(logging.CRITICAL)
            self.logger.exception(arguments)

        else:
            self.logger.setLevel(logging.INFO)
            self.logger.info(arguments)
コード例 #24
0
ファイル: TSACaller.py プロジェクト: dvi31/PyDxS
    def __init__(self, wsUri, certificate, hashName, keystore=None, truststore=None, verbose=False):
        self.url = wsUri

        self.rt = rfc3161.RemoteTimestamper(self.url, certificate, hashname=hashName, keystore=keystore,
                                            truststore=truststore)
        if verbose:
            logging.basicConfig(level=logging.DEBUG)
コード例 #25
0
ファイル: visualize.py プロジェクト: DavisBroda/kaggle-digits
def main():
    
    #print( 'Number of arguments: {0}'.format(len(sys.argv)) )
    #print( 'Argument List: {0}'.format(str(sys.argv)) )
    
    start = 1
    if len(sys.argv) > 1:
        start = int(sys.argv[1])
    
    end = start + 1
    if len(sys.argv) > 2:
        end = int(sys.argv[2])
    
    logging.getLogger('').handlers = []
    logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    log("Started mainline")
    
    trainingFileRaw = "data/train.csv"
    trainingFileNpy = "data/train.npy"   
    dataset = load(trainingFileRaw, trainingFileNpy)
    m, n = dataset.shape

    log("Full data set: rows: {0}, features: {1}".format(m,n))    

    predictions = execute(dataset, range(start, end))
    
    log("Completed mainline")
コード例 #26
0
def configureBasicLogger(logDir,logName=""):
    # start logger:
    fileLogPath = "sim_" + strftime("%H-%M", gmtime()) + ".log" if len(logName) == 0 else logName
    fileLogPath = os.path.join(logDir, fileLogPath)
    if not os.path.exists(logDir):
        os.makedirs(logDir)
    #     flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
    #     os.open(fileLogPath, flags)
    #     os.close(fileLogPath)
    # set up logging to file - see previous section for more details
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s [%(processName)-12.12s] [%(levelname)-5.5s]  %(message)s",
                        datefmt='%m-%d %H:%M:%S',
                        filename=fileLogPath,
                        filemode='w')
    # define a Handler which writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    # set a format which is simpler for console use
    formatter = logging.Formatter('%(asctime)s [%(processName)-12.12s] [%(levelname)-5.5s] %(message)s',
                                  datefmt='%m-%d %H:%M:%S')
    # tell the handler to use this format
    console.setFormatter(formatter)
    # add the handler to the root logger
    logging.getLogger().addHandler(console)
コード例 #27
0
ファイル: run.py プロジェクト: remram44/japong
def run(port, hostname, interface, verbosity):
    if verbosity == 0: # -q
        level = logging.CRITICAL
    elif verbosity == 1: # default
        level = logging.WARNING
    elif verbosity == 2: # -v
        level = logging.INFO
    else: # -v -v
        level = logging.DEBUG
    logging.basicConfig(level=level)

    if hostname:
        if not hostname.startswith('http://'):
            hostname = 'http://%s' % (hostname,)
        sep = hostname.find('/')
        if sep != -1:
            hostname = hostname[:sep]
        if port != 80 and not ':' in hostname:
            hostname = "http://%s:%d/" % (hostname, port)
        else:
            hostname = "http://%s/" % hostname
        logging.info('Hostname set to %s' % hostname)

    pong = PongGame()

    reactor.listenTCP(port, HttpFactory(hostname, pong), interface=interface)
    reactor.run()
コード例 #28
0
ファイル: maslahatbot.py プロジェクト: muminoff/maslahatbot
def main():
    global LAST_UPDATE_ID
    telegram_token = os.environ.get("TELEGRAM_TOKEN")

    logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    logger = logging.getLogger("Maslahat.uz")
    logger.setLevel(logging.DEBUG)

    # logger.debug("Initalizing bot ...")
    try:
        bot = telegram.Bot(telegram_token)
        # logger.debug("Connected to Telegram API")
    except telegram.error.TelegramError:
        pass
        # logger.warning("Cannot connect to Telegram server!")

    redis_url = os.environ.get("REDIS_URL")
    redis_conn = redis.from_url(redis_url)
    # logger.debug("Connected to Redis")

    # logger.debug("Receiving updates ...")
    try:
        LAST_UPDATE_ID = bot.getUpdates()[-1].update_id
        # logger.debug("Updates received")
    except IndexError:
        # logger.warning("No update received")
        LAST_UPDATE_ID = None

    # logger.debug("Starting heartbeat ...")
    heart_beat(logger, stat)
    # logger.debug("Waiting for updates ...")
    while True:
        bot_worker(redis_conn, bot, logger)
        check_facebook(redis_conn, bot, logger)
        check_announcements(redis_conn, bot, logger)
コード例 #29
0
ファイル: conftest.py プロジェクト: akakcolin/abipy
def pytest_report_header(config):
    """Write the initial header."""
    lines = ["\n*** Integration tests for abipy + abinit + pymatgen ***\n"]
    app = lines.append

    app("Assuming the enviroment is properly configured:")
    app("In particular, we assume that the abinit executable is in $PATH and can be executed.")
    app("Change manager.yml according to your platform.")
    app("Number of manager configurations: %d" % len(_manager_confs))

    if config.option.verbose > 0:
        for i, s in enumerate(_manager_confs):
            app(80 * "=")
            app("TaskManager #%d" % i)
            app(s)
            app(80 * "=")

    app("")

    # Initialize logging
    # loglevel is bound to the string value obtained from the command line argument.
    # Convert to upper case to allow the user to specify --loglevel=DEBUG or --loglevel=debug
    import logging
    numeric_level = getattr(logging, config.option.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % config.option.loglevel)
    logging.basicConfig(level=numeric_level)

    return lines
コード例 #30
0
ファイル: build_mozc.py プロジェクト: faxinba/mozc
def main():
  logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
  logging.getLogger().addFilter(ColoredLoggingFilter())

  if len(sys.argv) < 2:
    ShowHelpAndExit()

  # Move to the Mozc root source directory only once since os.chdir
  # affects functions in os.path and that causes troublesome errors.
  os.chdir(MOZC_ROOT)

  command = sys.argv[1]
  args = sys.argv[2:]

  if command == 'gyp':
    (cmd_opts, cmd_args) = ParseGypOptions(args)
    GypMain(cmd_opts, cmd_args)
  elif command == 'build':
    (cmd_opts, cmd_args) = ParseBuildOptions(args)
    BuildMain(cmd_opts, cmd_args)
  elif command == 'runtests':
    (cmd_opts, cmd_args) = ParseRunTestsOptions(args)
    RunTestsMain(cmd_opts, cmd_args)
  elif command == 'clean':
    (cmd_opts, cmd_args) = ParseCleanOptions(args)
    CleanMain(cmd_opts, cmd_args)
  else:
    logging.error('Unknown command: %s', command)
    ShowHelpAndExit()
コード例 #31
0
            reviewers.append(pr_info["merged_by"]["login"])
            pr_reviews = github.get(str(pr_info["url"]) + "/reviews")
            if not pr_reviews.ok:
                continue
            for review in pr_reviews.json():
                if review["state"].lower() == "approved":
                    reviewers.append(review["user"]["login"])

    return contributors, reviewers, merged_pr_count


if __name__ == "__main__":
    cmd_line_args = cmd_line_parser.parse_args()

    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        datefmt='%Y-%m-%d %T',
                        level=getattr(logging, cmd_line_args.loglevel))

    logging.info("Running circuitpython.org/libraries updater...")

    run_time = datetime.datetime.now()

    working_directory = os.path.abspath(os.getcwd())

    logging.info("Run Date: %s", run_time.strftime("%d %B %Y, %I:%M%p"))

    output_filename = ""
    local_file_output = False
    if cmd_line_args.output_file:
        output_filename = os.path.abspath(cmd_line_args.output_file)
        local_file_output = True
コード例 #32
0
def main():
    parser = argparse.ArgumentParser(
        description='Initialize a server with content.')
    parser.add_argument('--server', '-s', required=True, help='server address')
    parser.add_argument('--datasources-folder',
                        '-df',
                        required=True,
                        help='folder containing datasources')
    parser.add_argument('--workbooks-folder',
                        '-wf',
                        required=True,
                        help='folder containing workbooks')
    parser.add_argument('--site-id',
                        '-sid',
                        required=False,
                        default='',
                        help='site id of the site to use')
    parser.add_argument('--project',
                        '-p',
                        required=False,
                        default='Default',
                        help='project to use')
    parser.add_argument('--username',
                        '-u',
                        required=True,
                        help='username to sign into server')
    parser.add_argument('--logging-level',
                        '-l',
                        choices=['debug', 'info', 'error'],
                        default='error',
                        help='desired logging level (set to error by default)')
    args = parser.parse_args()

    password = getpass.getpass("Password: "******"Checking to see if we need to create the site...")

        all_sites = TSC.Pager(server.sites)
        existing_site = next(
            (s for s in all_sites if s.content_url == args.site_id), None)

        # Create the site if it doesn't exist
        if existing_site is None:
            print("Site not found: {0} Creating it...").format(args.site_id)
            new_site = TSC.SiteItem(
                name=args.site_id,
                content_url=args.site_id.replace(" ", ""),
                admin_mode=TSC.SiteItem.AdminMode.ContentAndUsers)
            server.sites.create(new_site)
        else:
            print("Site {0} exists. Moving on...").format(args.site_id)

    ################################################################################
    # Step 3: Sign-in to our target site
    ################################################################################
    print("Starting our content upload...")
    server_upload = TSC.Server(args.server)

    tableau_auth.site_id = args.site_id

    with server_upload.auth.sign_in(tableau_auth):

        ################################################################################
        # Step 4: Create the project we need only if it doesn't exist
        ################################################################################
        import time
        time.sleep(
            2)  # sad panda...something about eventually consistent model
        all_projects = TSC.Pager(server_upload.projects)
        project = next(
            (p
             for p in all_projects if p.name.lower() == args.project.lower()),
            None)

        # Create our project if it doesn't exist
        if project is None:
            print("Project not found: {0} Creating it...").format(args.project)
            new_project = TSC.ProjectItem(name=args.project)
            project = server_upload.projects.create(new_project)

        ################################################################################
        # Step 5:  Set up our content
        #     Publish datasources to our site and project
        #     Publish workbooks to our site and project
        ################################################################################
        publish_datasources_to_site(server_upload, project,
                                    args.datasources_folder)
        publish_workbooks_to_site(server_upload, project,
                                  args.workbooks_folder)
コード例 #33
0
ファイル: jobot.py プロジェクト: alicetragedy/jobot
# -*- coding: utf-8 -*-
from telegram import (ReplyKeyboardMarkup)
from telegram.ext import Updater, CommandHandler
from telegram.ext import MessageHandler, Filters, RegexHandler, ConversationHandler
import logging
import csv
import config

# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
                    level=logging.INFO)

logger = logging.getLogger(__name__)

LOCATION, EDUCATION, FIELD = range(3)

#variables 

userlocation = "Wien"
useredu = "Uni"
userfield = "Marketing"

# Methods handling commands

def start(bot, update):
  bot.sendMessage(update.message.chat_id,
    text='Hallo! Bist du auf Jobsuche? Ich kann weiterhelfen! In welcher Stadt Oesterreichs suchst du gerade?')
  return LOCATION

def location(bot, update):
  user = update.message.from_user
コード例 #34
0
    run command
    """
    cmd = shlex.split(cmd_str)
    try:
        if stdout_devnull:  # for pg_ctl command
            with open(os.devnull, 'w') as devnull:
                res = subprocess.run(cmd, stdout=devnull)
        else:
            res = subprocess.run(cmd,
                                 check=True,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 stdin=stdin)
    except subprocess.CalledProcessError as e:
        logger.critical(traceback.format_exc())
        logger.info('Command: {} '.format(cmd_str))
        logger.info('Stdout: {}'.format(e.stdout.decode("utf8")))
        logger.info('Stderr: {}'.format(e.stderr.decode("utf8")))
        sys.exit(1)
    return res


if __name__ == "__main__":
    from logging import basicConfig, DEBUG

    basicConfig(level=DEBUG)
    result1 = run_command('ls -l /tmp')
    logger.debug(result1.stdout.decode("utf8"))
    result2 = run_command('ls -l 4312aaaa')
    logger.debug(result2.stdout.decode("utf8"))
コード例 #35
0
ファイル: request.py プロジェクト: ignirtoq/simplemp
def setup_logging(verbosity):
    if verbosity is not None:
        level = ERROR - verbosity*10
        form = '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s'
        basicConfig(level=level, format=form)
コード例 #36
0
ファイル: download.py プロジェクト: xiali1/decoding-carelink
if __name__ == '__main__':
  import doctest
  doctest.testmod( )

  import sys
  port = None
  port = sys.argv[1:] and sys.argv[1] or False
  serial_num = sys.argv[2:] and sys.argv[2] or False
  if not port or not serial_num:
    print "usage:\n%s <port> <serial>, eg /dev/ttyUSB0 208850" % sys.argv[0]
    sys.exit(1)
  import link
  import stick
  import session
  from pprint import pformat
  logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
  log.info("howdy! I'm going to take a look at your pump download something info.")
  stick = stick.Stick(link.Link(port, timeout=.400))
  stick.open( )
  session = session.Pump(stick, serial_num)
  log.info(pformat(stick.interface_stats( )))

  downloader = Downloader(stick, session)
  downloader.download( )

  downloader = PageDownloader(stick, session)
  downloader.download( )

  log.info(pformat(stick.interface_stats( )))
  log.info("howdy! we downloaded everything.")
コード例 #37
0
import requests
from mcstatus import MinecraftServer

# parse args
parser = argparse.ArgumentParser(description="Get the count of minecraft players online and send a groupme message if the count changes")
parser.add_argument("groupme", type=str, help="the groupme bot ID / token")
parser.add_argument('server', type=str, help="the minecraft server address")
parser.add_argument('--debug', '-d', default=False, action='store_true', help="Enable debug logging. defaults to false")
args = vars(parser.parse_args())

GROUPME_BOT_ID      = args['groupme']
SERVER_URL          = args['server']
DEBUG               = args['debug']

# setup logger
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG if DEBUG else logging.WARN)
logger = logging.getLogger("minecraft_groupme_notifier")

server = MinecraftServer(SERVER_URL)
status = server.status()
count = status.players.online
logger.debug("{} players online".format(count))

# get file
p = Path("~", ".local", "minecraft_groupme").expanduser()
p.mkdir(parents=True, exist_ok=True)
# use the server name as the file name so we can run multiple instances of this
file = p / SERVER_URL


def write_count():
コード例 #38
0
# Credit Reference: https://github.com/mitmproxy/mitmproxy/issues/3306
from mitmproxy import ctx, http, options, exceptions, proxy
from mitmproxy.tools.dump import DumpMaster

import logging
import os

# initiate logging
my_path = os.path.abspath(os.path.dirname(__file__))
alertlog = os.path.join(my_path, './logs/alert.log')
logging.basicConfig(filename=alertlog, format='%(asctime)s : %(message)s', level=logging.INFO)

# https://github.com/mitmproxy/mitmproxy/issues/237
# We don't want console to explode with flows, set max to 50 and code below clears flows
view = ctx.master.addons.get("view")
MAX_STORE_COUNT = 50

class Granville:

    def request(self, flow):
        
        # calls function below
        clearFlows()
        
        if "jenna" in flow.request.pretty_url:

            logging.info(str(flow.request.pretty_url))
            flow.response = http.HTTPResponse.make(200,b"stop, not so fast yall...",{"Content-Type": "text/html"})

    def clearFlows():
コード例 #39
0
#!/usr/bin/env python3

import logging
import yaml
import os
import json
import sys
import hashlib
import urllib.parse
import traceback
from influxdb import InfluxDBClient
from http.server import HTTPServer, BaseHTTPRequestHandler

logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)


class Webhook2Influxdb:

    def __init__(self):
        config_file = os.path.join("config", "webhooks.yml")
        with open(config_file) as file:
            logging.debug("Loading locations from %s" % config_file)
            self.config = yaml.load(file, Loader=yaml.FullLoader)

        self.write_counter = 0
        self.pagination = 1

        self.influx_clients = {}
        for client in self.config:
            self.setup_influx_client(client['host'], client['port'], client['user'], client['password'], client['db'],
                                     client['measurement'], client['tags'])
コード例 #40
0
"""
Tests for `socketsender.app.cli` module.
"""
import logging
import os
import socket
import subprocess
import tempfile

logging.basicConfig()

FOO = None


def mktemp1(port=None):
    stream = f"""
---
- name: foo
  target_addr: "127.0.0.1"
  target_port: {port}
  frequency: 50
  length: 9
  source: file
  total: 10
  user_data1: "tests/data/a.txt"
- name: boo
  target_addr: "127.0.0.1"
  target_port: {port}
  frequency: 50
  length: 9
  source: file
コード例 #41
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser(
        (ModelArguments, DataTrainingArguments, TrainingArguments))
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        # If we pass only one argument to the script and it's the path to a json file,
        # let's parse it to get our arguments.
        model_args, data_args, training_args = parser.parse_json_file(
            json_file=os.path.abspath(sys.argv[1]))
    else:
        model_args, data_args, training_args = parser.parse_args_into_dataclasses(
        )

    if (os.path.exists(training_args.output_dir)
            and os.listdir(training_args.output_dir) and training_args.do_train
            and not training_args.overwrite_output_dir):
        raise ValueError(
            f"Output directory ({training_args.output_dir}) already exists and is not empty."
            "Use --overwrite_output_dir to overcome.")

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        level=logging.INFO
        if is_main_process(training_args.local_rank) else logging.WARN,
    )

    # Log on each process the small summary:
    logger.warning(
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        +
        f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
    )
    # Set the verbosity to info of the Transformers logger (on main process only):
    if is_main_process(training_args.local_rank):
        transformers.utils.logging.set_verbosity_info()
        transformers.utils.logging.enable_default_handler()
        transformers.utils.logging.enable_explicit_format()
    logger.info("Training/evaluation parameters %s", training_args)

    # Set seed before initializing model.
    set_seed(training_args.seed)

    # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
    # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
    # (the dataset will be downloaded automatically from the datasets Hub).
    #
    # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
    # 'text' is found. You can easily tweak this behavior (see below).
    #
    # In distributed training, the load_dataset function guarantee that only one local process can concurrently
    # download the dataset.
    if data_args.dataset_name is not None:
        # Downloading and loading a dataset from the hub.
        datasets = load_dataset(data_args.dataset_name,
                                data_args.dataset_config_name)
        if "validation" not in datasets.keys():
            datasets["validation"] = load_dataset(
                data_args.dataset_name,
                data_args.dataset_config_name,
                split=f"train[:{data_args.validation_split_percentage}%]",
            )
            datasets["train"] = load_dataset(
                data_args.dataset_name,
                data_args.dataset_config_name,
                split=f"train[{data_args.validation_split_percentage}%:]",
            )
    else:
        data_files = {}
        if data_args.train_file is not None:
            data_files["train"] = data_args.train_file
        if data_args.validation_file is not None:
            data_files["validation"] = data_args.validation_file
        extension = data_args.train_file.split(".")[-1]
        if extension == "txt":
            extension = "text"
        datasets = load_dataset(extension, data_files=data_files)
    # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
    # https://huggingface.co/docs/datasets/loading_datasets.html.

    # Load pretrained model and tokenizer
    #
    # Distributed training:
    # The .from_pretrained methods guarantee that only one local process can concurrently
    # download model & vocab.

    if model_args.config_name:
        config = AutoConfig.from_pretrained(model_args.config_name,
                                            cache_dir=model_args.cache_dir)
    elif model_args.model_name_or_path:
        config = AutoConfig.from_pretrained(model_args.model_name_or_path,
                                            cache_dir=model_args.cache_dir)
    else:
        config = CONFIG_MAPPING[model_args.model_type]()
        logger.warning(
            "You are instantiating a new config instance from scratch.")

    if model_args.tokenizer_name:
        tokenizer = AutoTokenizer.from_pretrained(
            model_args.tokenizer_name,
            cache_dir=model_args.cache_dir,
            use_fast=model_args.use_fast_tokenizer)
    elif model_args.model_name_or_path:
        tokenizer = AutoTokenizer.from_pretrained(
            model_args.model_name_or_path,
            cache_dir=model_args.cache_dir,
            use_fast=model_args.use_fast_tokenizer)
    else:
        raise ValueError(
            "You are instantiating a new tokenizer from scratch. This is not supported by this script."
            "You can do it from another script, save it, and load it from here, using --tokenizer_name."
        )

    if model_args.model_name_or_path:
        model = AutoModelForCausalLM.from_pretrained(
            model_args.model_name_or_path,
            from_tf=bool(".ckpt" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
        )
    else:
        logger.info("Training new model from scratch")
        model = AutoModelForCausalLM.from_config(config)

    model.resize_token_embeddings(len(tokenizer))

    # Preprocessing the datasets.
    # First we tokenize all the texts.
    if training_args.do_train:
        column_names = datasets["train"].column_names
    else:
        column_names = datasets["validation"].column_names
    text_column_name = "text" if "text" in column_names else column_names[0]

    def tokenize_function(examples):
        return tokenizer(examples[text_column_name])

    tokenized_datasets = datasets.map(
        tokenize_function,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
        remove_columns=column_names,
        load_from_cache_file=not data_args.overwrite_cache,
    )

    if data_args.block_size is None:
        block_size = tokenizer.model_max_length
        if block_size > 1024:
            logger.warn(
                f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
                "Picking 1024 instead. You can change that default value by passing --block_size xxx."
            )
        block_size = 1024
    else:
        if data_args.block_size > tokenizer.model_max_length:
            logger.warn(
                f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
                f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
            )
        block_size = min(data_args.block_size, tokenizer.model_max_length)

    # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
    def group_texts(examples):
        # Concatenate all texts.
        concatenated_examples = {
            k: sum(examples[k], [])
            for k in examples.keys()
        }
        total_length = len(concatenated_examples[list(examples.keys())[0]])
        # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
        # customize this part to your needs.
        total_length = (total_length // block_size) * block_size
        # Split by chunks of max_len.
        result = {
            k:
            [t[i:i + block_size] for i in range(0, total_length, block_size)]
            for k, t in concatenated_examples.items()
        }
        result["labels"] = result["input_ids"].copy()
        return result

    # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
    # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
    # to preprocess.
    #
    # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
    # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
    lm_datasets = tokenized_datasets.map(
        group_texts,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
        load_from_cache_file=not data_args.overwrite_cache,
    )

    # Initialize our Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=lm_datasets["train"] if training_args.do_train else None,
        eval_dataset=lm_datasets["validation"]
        if training_args.do_eval else None,
        tokenizer=tokenizer,
        # Data collator will default to DataCollatorWithPadding, so we change it.
        data_collator=default_data_collator,
    )

    # Training
    if training_args.do_train:
        model_path = (model_args.model_name_or_path if
                      (model_args.model_name_or_path is not None
                       and os.path.isdir(model_args.model_name_or_path)) else
                      None)
        train_result = trainer.train(model_path=model_path)
        trainer.save_model()  # Saves the tokenizer too for easy upload

        output_train_file = os.path.join(training_args.output_dir,
                                         "train_results.txt")
        if trainer.is_world_process_zero():
            with open(output_train_file, "w") as writer:
                logger.info("***** Train results *****")
                for key, value in sorted(train_result.metrics.items()):
                    logger.info(f"  {key} = {value}")
                    writer.write(f"{key} = {value}\n")

            # Need to save the state, since Trainer.save_model saves only the tokenizer with the model
            trainer.state.save_to_json(
                os.path.join(training_args.output_dir, "trainer_state.json"))

    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")

        eval_output = trainer.evaluate()

        perplexity = math.exp(eval_output["eval_loss"])
        results["perplexity"] = perplexity

        output_eval_file = os.path.join(training_args.output_dir,
                                        "eval_results_clm.txt")
        if trainer.is_world_process_zero():
            with open(output_eval_file, "w") as writer:
                logger.info("***** Eval results *****")
                for key, value in sorted(results.items()):
                    logger.info(f"  {key} = {value}")
                    writer.write(f"{key} = {value}\n")

    return results
コード例 #42
0
ファイル: main.py プロジェクト: tudorfil9/ops-cli
def configure_logging(args):
    if args.verbose:
        if args.verbose > 1:
            logging.basicConfig(level=logging.DEBUG)
        else:
            logging.basicConfig(level=logging.INFO)
コード例 #43
0
ファイル: unet_training_dict.py プロジェクト: w-j-p/MONAI
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # create a temporary directory and 40 random image, mask paris
    tempdir = tempfile.mkdtemp()
    print(f"generating synthetic data to {tempdir} (this may take a while)")
    for i in range(40):
        im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)

        n = nib.Nifti1Image(im, np.eye(4))
        nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))

        n = nib.Nifti1Image(seg, np.eye(4))
        nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

    images = sorted(glob(os.path.join(tempdir, "img*.nii.gz")))
    segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
    train_files = [{"img": img, "seg": seg} for img, seg in zip(images[:20], segs[:20])]
    val_files = [{"img": img, "seg": seg} for img, seg in zip(images[-20:], segs[-20:])]

    # define transforms for image and segmentation
    train_transforms = Compose(
        [
            LoadNiftid(keys=["img", "seg"]),
            AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
            ScaleIntensityd(keys=["img", "seg"]),
            RandCropByPosNegLabeld(
                keys=["img", "seg"], label_key="seg", size=[96, 96, 96], pos=1, neg=1, num_samples=4
            ),
            RandRotate90d(keys=["img", "seg"], prob=0.5, spatial_axes=[0, 2]),
            ToTensord(keys=["img", "seg"]),
        ]
    )
    val_transforms = Compose(
        [
            LoadNiftid(keys=["img", "seg"]),
            AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
            ScaleIntensityd(keys=["img", "seg"]),
            ToTensord(keys=["img", "seg"]),
        ]
    )

    # define dataset, data loader
    check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
    # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
    check_loader = DataLoader(
        check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available()
    )
    check_data = monai.utils.misc.first(check_loader)
    print(check_data["img"].shape, check_data["seg"].shape)

    # create a training data loader
    train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
    # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
    train_loader = DataLoader(
        train_ds,
        batch_size=2,
        shuffle=True,
        num_workers=4,
        collate_fn=list_data_collate,
        pin_memory=torch.cuda.is_available(),
    )
    # create a validation data loader
    val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
    val_loader = DataLoader(
        val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available()
    )

    # create UNet, DiceLoss and Adam optimizer
    device = torch.device("cuda:0")
    model = monai.networks.nets.UNet(
        dimensions=3,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    ).to(device)
    loss_function = monai.losses.DiceLoss(do_sigmoid=True)
    optimizer = torch.optim.Adam(model.parameters(), 1e-3)

    # start a typical PyTorch training
    val_interval = 2
    best_metric = -1
    best_metric_epoch = -1
    epoch_loss_values = list()
    metric_values = list()
    writer = SummaryWriter()
    for epoch in range(5):
        print("-" * 10)
        print(f"epoch {epoch + 1}/{5}")
        model.train()
        epoch_loss = 0
        step = 0
        for batch_data in train_loader:
            step += 1
            inputs, labels = batch_data["img"].to(device), batch_data["seg"].to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            epoch_len = len(train_ds) // train_loader.batch_size
            print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
            writer.add_scalar("train_loss", loss.item(), epoch_len * epoch + step)
        epoch_loss /= step
        epoch_loss_values.append(epoch_loss)
        print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")

        if (epoch + 1) % val_interval == 0:
            model.eval()
            with torch.no_grad():
                metric_sum = 0.0
                metric_count = 0
                val_images = None
                val_labels = None
                val_outputs = None
                for val_data in val_loader:
                    val_images, val_labels = val_data["img"].to(device), val_data["seg"].to(device)
                    roi_size = (96, 96, 96)
                    sw_batch_size = 4
                    val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
                    value = compute_meandice(
                        y_pred=val_outputs, y=val_labels, include_background=True, to_onehot_y=False, add_sigmoid=True
                    )
                    metric_count += len(value)
                    metric_sum += value.sum().item()
                metric = metric_sum / metric_count
                metric_values.append(metric)
                if metric > best_metric:
                    best_metric = metric
                    best_metric_epoch = epoch + 1
                    torch.save(model.state_dict(), "best_metric_model.pth")
                    print("saved new best metric model")
                print(
                    "current epoch: {} current mean dice: {:.4f} best mean dice: {:.4f} at epoch {}".format(
                        epoch + 1, metric, best_metric, best_metric_epoch
                    )
                )
                writer.add_scalar("val_mean_dice", metric, epoch + 1)
                # plot the last model output as GIF image in TensorBoard with the corresponding image and label
                plot_2d_or_3d_image(val_images, epoch + 1, writer, index=0, tag="image")
                plot_2d_or_3d_image(val_labels, epoch + 1, writer, index=0, tag="label")
                plot_2d_or_3d_image(val_outputs, epoch + 1, writer, index=0, tag="output")
    shutil.rmtree(tempdir)
    print(f"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}")
    writer.close()
コード例 #44
0
ファイル: spidey.py プロジェクト: alam0/unixxx
    except getopt.GetoptError as e:
        usage(1)

    for option, value in options:
        if option == '-p':
            PORT = int(value)
        elif option == '-f':
            FORKING = True
        elif option == '-d':
            DOCROOT = value
        elif option == '-v':
            LOGLEVEL = logging.DEBUG
        else:
            usage(1)

    # Set logging level
    logging.basicConfig(
        level   = LOGLEVEL,
        format  = '[%(asctime)s] %(message)s',
        datefmt = '%Y-%m-%d %H:%M:%S',
    )

    # Instantiate and run server
    server = TCPServer(port=PORT, forking=FORKING)

    try:
        server.run()
    except KeyboardInterrupt:
        sys.exit(0)

コード例 #45
0
import logging
logging.basicConfig(level=logging.INFO)

import time
import numpy as np
import cv2
import pyrealsense as pyrs
from pyrealsense.constants import rs_option

depth_fps = 90
depth_stream = pyrs.stream.DepthStream(fps=depth_fps)


def convert_z16_to_bgr(frame):
    '''Performs depth histogram normalization
    This raw Python implementation is slow. See here for a fast implementation using Cython:
    https://github.com/pupil-labs/pupil/blob/master/pupil_src/shared_modules/cython_methods/methods.pyx
    '''
    hist = np.histogram(frame, bins=0x10000)[0]
    hist = np.cumsum(hist)
    hist -= hist[0]
    rgb_frame = np.empty(frame.shape[:2] + (3,), dtype=np.uint8)

    zeros = frame == 0
    non_zeros = frame != 0

    print(frame.shape)
    print("\n\n")

    f = hist[frame[non_zeros]] * 255 / hist[0xFFFF]
    rgb_frame[non_zeros, 0] = 255 - f
コード例 #46
0
ファイル: faber.py プロジェクト: davidkim97/banggooseok-chain
)
from runners.support.utils import (  # noqa:E402
    log_msg,
    log_status,
    log_timer,
    prompt,
    prompt_loop,
    require_indy,
)


CRED_PREVIEW_TYPE = "https://didcomm.org/issue-credential/2.0/credential-preview"
SELF_ATTESTED = os.getenv("SELF_ATTESTED")
TAILS_FILE_COUNT = int(os.getenv("TAILS_FILE_COUNT", 100))

logging.basicConfig(level=logging.WARNING)
LOGGER = logging.getLogger(__name__)


class FaberAgent(AriesAgent):
    def __init__(
        self,
        ident: str,
        http_port: int,
        admin_port: int,
        no_auto: bool = False,
        **kwargs,
    ):
        super().__init__(
            ident,
            http_port,
コード例 #47
0
import os
import numpy as np
import tensorflow as tf
from crnn_model import crnn_model
from global_configuration import config
import define_input_fn
import hparams
from data_prepare import char_dict, load_tf_data
import cv2

# from log_utils import log_util
#
# logger = log_util.init_logger()
import logging

logging.basicConfig(level=logging.DEBUG)


def crnn_net(is_training, feature, label, batch_size, l_size):
    seq_len = l_size
    if is_training:
        shadownet = crnn_model.ShadowNet(
            phase='Train',
            hidden_nums=256,
            layers_nums=2,
            seq_length=seq_len,
            num_classes=config.cfg.TRAIN.CLASSES_NUMS,
            rnn_cell_type='lstm')

        imgs = tf.image.resize_images(feature, (32, l_size * 4), method=0)
        input_imgs = tf.cast(x=imgs, dtype=tf.float32)
コード例 #48
0
#!/usr/bin/env python3
"""An example that prints all Crownstone IDs seen on the mesh."""
import logging
import time

logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
# Create new instance of Bluenet
from crownstone_uart import CrownstoneUart

uart = CrownstoneUart()

# Start up the USB bridge.
uart.initialize_usb_sync()
# you can alternatively do this async by
# await uart.initialize_usb()

# List the ids that have been seen
print("Listening for Crownstones on the mesh, this might take a while.")

# the try except part is just to catch a control+c, time.sleep does not appreciate being killed.
initial_time = time.time()
try:
    while uart.running:
        time.sleep(2)
        ids = uart.get_crownstone_ids()
        print("Crownstone IDs seen so far:", ids, "after",
              round(time.time() - initial_time), "seconds")
except KeyboardInterrupt:
    print("\nClosing example.... Thank you for your time!")
except:
    print("\nClosing example.... Thank you for your time!")
コード例 #49
0
ファイル: __init__.py プロジェクト: NickxFury/TorLeechGram
# Copyright (c) 2021 Priiiyo [priiiyo@github]
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = "0.2.4"
__author__ = "Priiiyo Github@priiiyo"

import logging
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s %(levelname)s %(threadName)s %(name)s %(message)s",
    handlers=[logging.StreamHandler(),
              logging.FileHandler("torlog.txt")])

from torleechgram.core.wserver import start_server
from .core.database_handle import TlgUpload, TorLeechGramDB, TlgTorrents, UserDB
from .core.varholdern import VarHolder
import time

logging.info("Database created")
upload_db = TlgUpload()
var_db = TorLeechGramDB()
tor_db = TlgTorrents()
user_db = UserDB()

uptime = time.time()
to_del = []
SessionVars = VarHolder(var_db)
コード例 #50
0
ファイル: __init__.py プロジェクト: leadermin/dp-tornado
    def run(self, **kwargs):
        custom_scheduler = kwargs[
            'scheduler'] if 'scheduler' in kwargs else None
        custom_service = kwargs['service'] if 'service' in kwargs else None
        custom_config_file = kwargs[
            'config_file'] if 'config_file' in kwargs else None

        engine_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
        application_path = kwargs[
            'application_path'] if 'application_path' in kwargs else None
        combined_path = os.path.join(application_path, 'static', 'combined')

        if 'initialize' in kwargs and kwargs['initialize']:
            import shutil

            template_path = os.path.join(engine_path, 'engine', 'template')

            for root, dirs, files in os.walk(template_path):
                path = root[len(template_path) + 1:]
                app_path = os.path.join(application_path, path)

                if path and os.path.isdir(app_path):
                    continue

                if not os.path.isdir(app_path):
                    os.mkdir(app_path)

                for file in files:
                    src = os.path.join(root, file)
                    dest = os.path.join(app_path, file)

                    if not os.path.isfile(dest):
                        shutil.copy(src, dest)

        # INI
        config = configparser.RawConfigParser()
        config.read(
            os.path.join(application_path, custom_config_file or 'config.ini'))

        def get_cfg(c, option, section='server', default=None):
            try:
                get = c.get(section, option)

                if default is True or default is False:
                    return True if get == '1' else False

                elif isinstance(default, str):
                    return str(get)

                elif isinstance(default, int):
                    return int(get)

                else:
                    return get
            except (configparser.NoSectionError, configparser.NoOptionError):
                return default

        # Setup Options
        tornado.options.define('max_worker',
                               default=get_cfg(config, 'max_worker',
                                               default=1))
        tornado.options.define('num_processes',
                               default=get_cfg(config,
                                               'num_processes',
                                               default=0))
        tornado.options.define('port',
                               default=get_cfg(config, 'port', default=8080))
        tornado.options.define('debug',
                               default=get_cfg(config, 'debug', default=False))
        tornado.options.define('gzip',
                               default=get_cfg(config, 'gzip', default=True))
        tornado.options.define('crypto_key',
                               default=get_cfg(config,
                                               'key',
                                               section='crypto',
                                               default='CR$t0-$CR@T'))
        tornado.options.define('session_dsn',
                               default=get_cfg(config,
                                               'dsn',
                                               section='session',
                                               default=None))
        tornado.options.define('session_exp_in',
                               default=get_cfg(config,
                                               'expire_in',
                                               section='session',
                                               default=7200))
        tornado.options.define('max_body_size',
                               default=get_cfg(config,
                                               'max_body_size',
                                               default=1024 * 1024 * 10))
        tornado.options.define('application_path', application_path)
        tornado.options.define('python', sys.executable)

        # Static AWS
        tornado.options.define('static_aws_id',
                               default=get_cfg(config,
                                               'aws_id',
                                               section='static'))
        tornado.options.define('static_aws_secret',
                               default=get_cfg(config,
                                               'aws_secret',
                                               section='static'))
        tornado.options.define('static_aws_bucket',
                               default=get_cfg(config,
                                               'aws_bucket',
                                               section='static'))
        tornado.options.define('static_aws_endpoint',
                               default=get_cfg(config,
                                               'aws_endpoint',
                                               section='static'))

        access_logging = get_cfg(config,
                                 'access',
                                 default=1,
                                 section='logging')
        sql_logging = get_cfg(config, 'sql', default=0, section='logging')

        # Initialize Logging
        logging.basicConfig(
            level=logging.DEBUG if access_logging else logging.WARN,
            format='[%(asctime)s][%(levelname)s] %(message)s')

        # SQLAlchemy logging level
        if sql_logging:
            logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)

        logging.info('---------------------------------')
        logging.info('dp for Tornado has been started..')
        logging.info('---------------------------------')

        services_raw = [
            (r"/", None),
            (r"/(.*)", None),
        ]

        if custom_service:
            for e in custom_service:
                services_raw.append(e)

        services = []
        default_handler = None

        for service in services_raw:
            if len(service) < 2:
                raise Exception('The specified service is invalid.')

            if service[1] is not None:
                s = str.split(service[1], '.')
                class_name = s.pop()
                module_path = '.'.join(s)

                handler_module = importlib.import_module(module_path)
                handler = getattr(handler_module, class_name)

            else:
                if default_handler is None:
                    handler_module = importlib.import_module(
                        'dp_tornado.engine.default_handler')
                    default_handler = getattr(handler_module, 'DefaultHandler')

                module_path = 'controller'
                handler = default_handler

            services.append((service[0], handler, dict(prefix=module_path)))

        # Clear combined files
        Compressor.clear(combined_path)

        compressor_path = os.path.join(engine_path, 'engine', 'plugin',
                                       'compressor')

        settings = {
            'template_path':
            os.path.join(application_path, 'view'),
            'static_path':
            os.path.join(application_path, 'static'),
            'static_url_prefix':
            '/s/',
            'combined_static_path':
            combined_path,
            'combined_static_url_prefix':
            '/s/combined/',
            'compressors': {
                'minifier': None
            },
            'debug':
            tornado.options.options.debug,
            'gzip':
            tornado.options.options.gzip,
            'cookie_secret':
            get_cfg(config, 'cookie_secret', default='default_cookie_secret'),
            'ui_modules': {}
        }

        num_processed = (tornado.options.options.num_processes
                         if tornado.options.options.num_processes else
                         multiprocessing.cpu_count())

        logging.info('Server Mode : %s' %
                     ('Production'
                      if not tornado.options.options.debug else 'Debugging'))
        logging.info('Server time : %s' % time.strftime('%Y.%m.%d %H:%M:%S'))
        logging.info('Server Port : %s' % tornado.options.options.port)
        logging.info('Processors  : %s' % num_processed)
        logging.info('CPU Count   : %d' % multiprocessing.cpu_count())
        logging.info('---------------------------------')

        if custom_scheduler:
            scheduler = Scheduler(custom_scheduler)
            scheduler.start()

        else:
            scheduler = None

        application = RestfulApplication(services, settings)
        service = tornado.httpserver.HTTPServer(
            application,
            xheaders=True,
            max_body_size=tornado.options.options.max_body_size)
        service.bind(tornado.options.options.port, '')
        service.start(tornado.options.options.num_processes)

        import random
        application.identifier = random.randint(100000, 999999)

        try:
            instance = tornado.ioloop.IOLoop.instance()
            instance.__setattr__('startup_at',
                                 getattr(application, 'startup_at'))
            instance.start()

        except KeyboardInterrupt:
            if scheduler:
                scheduler.interrupted = True
コード例 #51
0
ファイル: display_progress.py プロジェクト: TMAdminz/RNAME
import logging
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

import math
import os
import time

# the secret configuration specific things
if bool(os.environ.get("WEBHOOK", False)):
    from sample_config import Config
else:
    from config import Config

# the Strings used for this "thing"
from translation import Translation


async def progress_for_pyrogram(
    current,
    total,
    ud_type,
    message,
    start
):
    now = time.time()
    diff = now - start
    if round(diff % 5.00) == 0 or current == total:
        # if round(current / total * 100, 0) % 5 == 0:
        percentage = current * 100 / total
コード例 #52
0
def pull_data(data_folder: str, data_prod: str, force_exec: bool = False):
    """Función para tomar los datos desde el repo GitHub y dejarlos en la
    ubicación de producción."""
    import os
    import pdb
    import sys
    import shutil
    import logging
    import numpy as np
    import pandas as pd
    from datetime import datetime
    from src import helpers
    # pdb.set_trace()
    helpers.print_ts(code=1,
                     text="Proceso de configuración de la data, comenzando.")
    # Configurando variables de ejecución
    base_path = os.getenv("BASE_APP_PATH")
    # Configurando la instancia root de logging
    log_fmt = "%(asctime)s - %(process)d - %(levelname)s - %(name)s::%(message)s"
    date_fmt = "%Y-%m-%d %H:%M:%S"
    log_loc = os.path.join(base_path, "logs", "setup.log")
    json_file = "data_stat.json"
    # Configurando logging
    logging.basicConfig(level=logging.DEBUG,
                        filename=log_loc,
                        filemode="a",
                        format=log_fmt,
                        datefmt=date_fmt)
    logging.info(":::Comenzando proceso de configuración de la data:::")
    logging.info("Configurando el entorno...")
    # Definiendo variables de entorno
    git_file = os.path.join(base_path, data_folder)
    csv_fold = os.path.join(base_path, data_prod)
    csv_file = os.path.join(base_path, data_prod, "Covid-19_std.csv")
    data_src = os.path.join(git_file, "output", "producto1",
                            "Covid-19_std.csv")
    data_fnl = os.path.join(base_path, data_prod, "data.csv")
    previous_folder = False
    perform_load = True
    # Revisando si existe el archivo de datos a manipular
    try:
        logging.info(
            "Revisando que exista el archivo con los datos a utilizar...")
        helpers.check_file(data_src)
        logging.info("El archivo existe.")
    except:
        logging.error("El archivo de datos no existe.")
        logging.exception("Detalle del error:")
        helpers.print_ts(code=3, text="Los datos aún no son descargados.")
        logging.error("Saliendo del programa, con estado 5.")
        # Escribiendo JSON de estado:
        current = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
        status_dict = dict(status="error",
                           ts=current,
                           exit_code=5,
                           desc_stat="No available data",
                           last_date=None)
        helpers.print_last_update(data_dict=status_dict, json_f=json_file)
        raise Exception(
            "El archivo de datos no existe. Saliendo con excepcion 5...")
    # Revisando si existe la carpeta objetivo
    try:
        logging.info(
            "Revisando que exista la carpeta de destino para la data...")
        helpers.check_file(csv_fold)
        logging.info("La carpeta existe.")
        previous_folder = True
    except:
        logging.warning("La carpeta no existe.")
        logging.exception("Detalle del error:")
        logging.info("Creando carpeta de datos...")
        os.mkdir(csv_fold)
        logging.info("Carpeta de datos creada.")
    # Leyendo el archivo a un dataframe de Pandas
    logging.info("Leyendo datos recién cargados...")
    df_tmp = pd.read_csv(data_src)
    last_dt = max(df_tmp.Fecha)
    try:
        logging.info("Leyendo datos de la última corrida exitosa...")
        df_tmp_2 = pd.read_csv(data_fnl)
        logging.info("Datos leidos.")
        previous_folder = True
        if force_exec:
            pass
        else:
            perform_load = False
    except:
        df_tmp_2 = None
        logging.error("No hay datos previos.")
        logging.exception("Detalles del error:")
        logging.info("Se ejecutará el proceso de configuracion setup_data.py")
    # Si la última fecha disponible es mayor a la última fecha guardada, se copia a la nueva ubicación
    if perform_load:
        pass
    else:
        logging.info(
            "Obteniendo fechas a comparar: última carga, últimos guardados, último registro..."
        )
        try:
            saved_dt = max(df_tmp_2.fecha)
            saved_dt_json = helpers.pull_last_update(json_f=json_file,
                                                     field="last_date")[0]
            logging.info(
                "La última ejecución obtuvo datos del día {0}".format(last_dt))
            logging.info(
                "Los últimos datos guardados son del día {0}".format(saved_dt))
            logging.info(
                "La última ejecución tiene registro de la fecha {0}".format(
                    saved_dt_json))
            del df_tmp, df_tmp_2
        except:
            logging.error("Uno o más de los archivos fuente no existe.")
            logging.exception("Detalles del error:")
            logging.info(
                "Se ejecutará el proceso de configuración setup_data.py")
            perform_load = True
        try:
            logging.info(
                "Revisando que las fechas coincidan para la ultima ejecucion guardada..."
            )
            if saved_dt == saved_dt_json:
                logging.info("Las fechas coinciden.")
            else:
                raise Exception(
                    "Las fechas guardadas del ultimo proceso de carga no coinciden."
                )
        except:
            logging.error(
                "Las fechas almacenadas de la ultima ejecucion del proceso no coinciden."
            )
            logging.exception("Detalle del error:")
            logging.info(
                "Se ejecutará el proceso de configuracion setup_data.py")
            perform_load = True
        # Se revisan las fechas procesadas para verificar que se debe ejecutar el proceso o se mantiene la fecha anterior
        try:
            if perform_load:
                pass
            else:
                logging.info("Revisando la última fecha procesada...")
                if saved_dt_json == None:
                    logging.info(
                        "La última ejecución fue errónea o no existe.")
                    raise Exception(
                        "La ultima ejecucion fue erronea o no existe.")
                else:
                    if last_dt <= saved_dt and previous_folder:
                        logging.warning(
                            "No se ejecutará el proceso, se trabajará con los datos existentes."
                        )
                        helpers.print_ts(
                            code=2,
                            text=
                            "No se ejecutará el proceso, se trabajará con los datos existentes."
                        )
                        return None
                    else:
                        logging.info("Se ejecutará el proceso setup_data.py")
                        perform_load = True
        except:
            logging.warning("Se ejecutará el proceso setup_data.py")
            logging.exception("Detalles del error:")
    try:
        if not perform_load and not force_exec:
            pass
        else:
            logging.info(
                "Copiando archivo origen de datos a ubicación final...")
            shutil.copyfile(data_src, csv_file)
            logging.info("Archivo copiado.")
    except:
        logging.error(
            "Error al copiar el archivo fuente a su ubicación final.")
        logging.exception("Detalle del error:")
        helpers.print_ts(
            code=3,
            text="Error al copiar el archivo fuente a su ubicación final.")
        logging.error("Saliendo del programa, con estado 15.")
        # Escribiendo JSON de estado:
        current = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
        status_dict = dict(status="error",
                           ts=current,
                           exit_code=15,
                           last_date=None,
                           desc_stat="Copy error")
        helpers.print_last_update(data_dict=status_dict, json_f=json_file)
        raise Exception(
            "Error al copiar el archivo de datos. Saliendo con excepcion 15..."
        )
    # Transformando la data en Pandas para mejor lectura
    logging.info("Lectura de datos finales en Pandas...")
    df_final = pd.read_csv(csv_file)
    dict_cols = {
        "Region": "region",
        "Codigo region": "cod_region",
        "Comuna": "comuna",
        "Codigo comuna": "cod_comuna",
        "Poblacion": "poblacion",
        "Fecha": "fecha",
        "Casos confirmados": "casos"
    }
    logging.info("Cambiando nombre de columnas...")
    df_final.rename(columns=dict_cols, inplace=True)
    logging.info("Nombres de columnas cambiados.")
    logging.info("Cambiando columna 'fecha' a formato fecha...")
    try:
        logging.info(
            "Guardando los datos finales en la ubicación de producción...")
        df_final.to_csv(data_fnl, index=False, encoding="utf-8")
        logging.info("Datos guardados.")
    except:
        logging.error("Error al guardar el archivo de datos final.")
        logging.exception("Detalle del error:")
        helpers.print_ts(code=3,
                         text="Error al guardar el archivo de datos final.")
        logging.error("Saliendo del programa, con estado 22.")
        # Escribiendo JSON de estado:
        current = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
        status_dict = dict(status="error",
                           ts=current,
                           exit_code=22,
                           last_date=None,
                           desc_stat="Save error")
        helpers.print_last_update(data_dict=status_dict, json_f=json_file)
        raise Exception(
            "El archivo de datos no se pudo guardar. Saliendo con excepcion 22..."
        )
    # Fin del proceso
    logging.info(
        ">>>Proceso de obtención de la data, completado exitosamente.")
    helpers.print_ts(
        code=1,
        text="Proceso de obtención de la data, completado exitosamente.")
    # Escribiendo JSON de estado:
    current = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
    status_dict = dict(status="copied",
                       ts=current,
                       last_date=last_dt,
                       exit_code=0)
    helpers.print_last_update(data_dict=status_dict, json_f=json_file)
    logging.shutdown()
    return None
コード例 #53
0
import asyncio
import logging
import unittest

import yaml

import netdev

logging.basicConfig(filename='tests/unittest.log', level=logging.DEBUG)
config_path = 'config.yaml'


class TestRouterOS(unittest.TestCase):
    @staticmethod
    def load_credits():
        with open(config_path, 'r') as conf:
            config = yaml.load(conf)
            with open(config['device_list'], 'r') as devs:
                devices = yaml.load(devs)
                params = [p for p in devices if p['device_type'] == 'mikrotik_routeros']
                return params

    def setUp(self):
        self.loop = asyncio.new_event_loop()
        self.loop.set_debug(False)
        asyncio.set_event_loop(self.loop)
        self.devices = self.load_credits()
        self.assertFalse(len(self.devices) == 0)

    def test_show_system_identity(self):
        async def task():
コード例 #54
0
        Image.init()
        id = sorted(Image.ID)
        print("Supported formats:")
        for i in id:
            print(i, end=' ')
        sys.exit(1)
    elif o == "-i":
        verbose = 1
    elif o == "-q":
        quiet = 1
    elif o == "-v":
        verify = 1
    elif o == "-D":
        logging_level = "DEBUG"

logging.basicConfig(level=logging_level)


def globfix(files):
    # expand wildcards where necessary
    if sys.platform == "win32":
        out = []
        for file in files:
            if glob.has_magic(file):
                out.extend(glob.glob(file))
            else:
                out.append(file)
        return out
    return files

for file in globfix(args):
コード例 #55
0
ファイル: job.py プロジェクト: russellPanda/devops-2
# 设置默认的Django设置模块。
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'devops.settings')
# 让django初始化
import django
django.setup()
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.schedulers.gevent import GeventScheduler
# from django_apscheduler.jobstores import DjangoJobStore, register_events, register_job
from webssh.views import cls_terminalsession
from django.core.cache import cache
import datetime
import time
import fcntl
import logging
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def myfunc():
    with open('/tmp/ap.txt', 'a+') as f:
        f.write('ap\n')


# scheduler = BackgroundScheduler()
# scheduler = GeventScheduler()     # 使用 GeventScheduler 无法正常工作
# scheduler.add_jobstore(DjangoJobStore(), "default")
# scheduler.start()
# register_events(scheduler)
# if scheduler.get_job('my_job_id'):
#     print('my_job_id 存在')
コード例 #56
0
        type=str,
        help=(
            "key to search for. if you're dealing with JSON data use the"
            " jq syntax for searching"
        )
    )
    body_parser.add_argument(
        '--value',
        required=True,
        type=str,
        help='value to match the key against'
    )
    body_parser.set_defaults(
        scan_spooler=scan_body
    )

    parser.add_argument(
        'spooler_dir',
        type=str,
        help='Spooler directory to scan'
    )
    return parser


if __name__ == '__main__':
    logging.basicConfig(
        level=logging.WARNING,
        format='[%(asctime)s] - [%(levelname)s]: %(message)s'
    )
    main()
コード例 #57
0
ファイル: __main__.py プロジェクト: brfrs/Murmur
# !/usr/bin/env python3

import tkinter as tk
from main_app import App
import logging

if __name__ == '__main__':
	logging.basicConfig(filename='console.log', level=logging.INFO)
	
	logging.info("\n==Application starting==\n")
	root = tk.Tk()
	
	App(root)

	root.mainloop()
コード例 #58
0
ファイル: __init__.py プロジェクト: Thousandhack/Flask_home
# 获取redis数据库连接
redis_store = None

# 为flask补充防护机制
csrf = CSRFProtect()

#
# logging.error("11")  # 错误级别
# logging.warn("222")   # 警告级别
# logging.info()  # 消息提示级别
# logging.debug()  # 调试级别

# 设置日志等级
# 日志等级信息只有在项目运行不在DEBUG 的模式下,才生效控制日志等级的写入
logging.basicConfig(level=logging.DEBUG)  # 调试debug

# 创建日志记录器,指定日志保存路径、每个日志文件得最大大小、保存的日志个数上限
file_log_handler = RotatingFileHandler("logs/home.log",
                                       maxBytes=1024 * 1024 * 100,
                                       backupCount=10)

# 创建日志记录的格式
formatter = logging.Formatter(
    '%(levelname)s %(filename)s:%(lineno)d %(message)s')
# 为刚创建的日志记录器设置日志记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象(flask app 使用的) 添加日志器
logging.getLogger().addHandler(file_log_handler)

コード例 #59
0
if __name__ == "__main__":
    # I/O interface, read from an input file
    parser = argparse.ArgumentParser()
    parser.add_argument('-i',
                        '--input',
                        help='input file path of video ids',
                        required=True)
    parser.add_argument('-o',
                        '--output',
                        help='output file path of video data',
                        required=True)
    args = parser.parse_args()

    input_path = args.input
    output_path = args.output
    logging.basicConfig(filename='./youtube_crawler.log',
                        level=logging.WARNING)

    developer_key = DEVELOPER_KEY
    if developer_key == 'YOUR_DEVELOPER_KEY':
        logging.error('>>> You need to set your own developer key!')
        logging.error('>>> Exiting...')
        sys.exit(1)

    yt_crawler = discovery.build(YOUTUBE_API_SERVICE_NAME,
                                 YOUTUBE_API_VERSION,
                                 developerKey=developer_key)
    opener = urllib2.build_opener()
    cookie, sessiontoken = get_cookie_and_sessiontoken()
    postdata = get_post_data(sessiontoken)

    output_data = open(output_path, 'a+')
def setUpModule():
    logging.basicConfig(
        filename="/tmp/oci_ansible_module.log", filemode="a", level=logging.INFO
    )
    oci_load_balancer_protocol_facts.set_logger(logging)