try: pool = Pool(processes=flags.OPTIONS.thread_pool_size) for switch_ip, switch_credentials in poll_switches.items(): pool.apply_async( poll_switch.poll_switch, (user.email, switch_ip, switch_credentials) ) pool.close() pool.join() except Exception as error: logging.error('failed to poll switches %s', poll_switches) logging.exception(error) if __name__ == '__main__': flags.init() logsetting.init() database.init() logging.info('run poll_switch') daemonize.daemonize( functools.partial( pollswitches, [switch_ip for switch_ip in flags.OPTIONS.switch_ips.split(',') if switch_ip]), flags.OPTIONS.run_interval, pidfile=lockfile.FileLock('/var/run/poll_switch.pid'), stderr=open('/tmp/poll_switch_err.log', 'w+'), stdout=open('/tmp/poll_switch_out.log', 'w+'))
flags.add('run_interval', help='run interval in seconds', default=setting.PROGRESS_UPDATE_INTERVAL) def progress_update(cluster_hosts): """entry function.""" if flags.OPTIONS.async: celery.send_task('compass.tasks.update_progress', (cluster_hosts,)) else: try: update_progress.update_progress(cluster_hosts) except Exception as error: logging.error('failed to update progress for cluster_hosts: %s', cluster_hosts) logging.exception(error) if __name__ == '__main__': flags.init() logsetting.init() logging.info('run progress update') daemonize.daemonize( functools.partial( progress_update, util.get_clusters_from_str(flags.OPTIONS.clusters)), flags.OPTIONS.run_interval, pidfile=lockfile.FileLock('/var/run/progress_update.pid'), stderr=open('/tmp/progress_update_err.log', 'w+'), stdout=open('/tmp/progress_update_out.log', 'w+'))
flags.add_bool('async', help='run in async mode', default=True) flags.add('run_interval', help='run interval in seconds', default=setting.PROGRESS_UPDATE_INTERVAL) def progress_update(cluster_hosts): """entry function.""" if flags.OPTIONS. async: celery.send_task('compass.tasks.update_progress', (cluster_hosts, )) else: try: update_progress.update_progress(cluster_hosts) except Exception as error: logging.error('failed to update progress for cluster_hosts: %s', cluster_hosts) logging.exception(error) if __name__ == '__main__': flags.init() logsetting.init() logging.info('run progress update') daemonize.daemonize( functools.partial(progress_update, util.get_clusters_from_str(flags.OPTIONS.clusters)), flags.OPTIONS.run_interval, pidfile=lockfile.FileLock('/var/run/progress_update.pid'), stderr=open('/tmp/progress_update_err.log', 'w+'), stdout=open('/tmp/progress_update_out.log', 'w+'))
help='run in async mode', default=True) flags.add('run_interval', type='int', help='run interval in seconds', default=setting.PROGRESS_UPDATE_INTERVAL) def progress_update(): """entry function.""" if flags.OPTIONS.async: celery.send_task('compass.tasks.update_progress', ()) else: try: update_progress.update_progress() except Exception as error: logging.error('failed to update progress') logging.exception(error) if __name__ == '__main__': flags.init() logsetting.init() database.init() logging.info('run progress update') daemonize.daemonize( progress_update, flags.OPTIONS.run_interval, pidfile=lockfile.FileLock('/var/run/progress_update.pid'), stderr=open('/tmp/progress_update_err.log', 'w+'), stdout=open('/tmp/progress_update_out.log', 'w+'))
flags.add_bool('async', help='run in async mode', default=True) flags.add('run_interval', type='int', help='run interval in seconds', default=setting.PROGRESS_UPDATE_INTERVAL) def progress_update(): """entry function.""" if flags.OPTIONS. async: celery.send_task('compass.tasks.update_progress', ()) else: try: update_progress.update_progress() except Exception as error: logging.error('failed to update progress') logging.exception(error) if __name__ == '__main__': flags.init() logsetting.init() database.init() logging.info('run progress update') daemonize.daemonize( progress_update, flags.OPTIONS.run_interval, pidfile=lockfile.FileLock('/var/run/progress_update.pid'), stderr=open('/tmp/progress_update_err.log', 'w+'), stdout=open('/tmp/progress_update_out.log', 'w+'))