def main(args):
    config.load_config()

    if (args['create']):
        con = get_connector(args['<connector>'])
        auth = con.authenticate()
        config.write_auth(args['<connector>'], args['--label'], auth)

    elif (args['delete']):
        config.remove_auth(args['<connector>'], args['--label'])
Exemple #2
0
    def __init__(self, args):
        self.args = args
        self.cfg = load_config(args.cfg_name)

        self.converter = LabelConverter(chars_file=args.chars_file)

        self.tr_ds = Dataset(self.cfg, args.train_dir, args.train_gt_dir,
                             self.converter, self.cfg.batch_size)

        self.cfg.lr_boundaries = [self.tr_ds.num_batches * epoch for epoch in self.cfg.lr_decay_epochs]
        self.cfg.lr_values = [self.cfg.lr * (self.cfg.lr_decay_rate ** i) for i in
                              range(len(self.cfg.lr_boundaries) + 1)]

        if args.val_dir is None:
            self.val_ds = None
        else:
            self.val_ds = Dataset(self.cfg, args.val_dir, args.val_gt_dir,
                                  self.converter, self.cfg.batch_size, shuffle=False)

        if args.test_dir is None:
            self.test_ds = None
        else:
            # Test images often have different size, so set batch_size to 1
            self.test_ds = Dataset(self.cfg, args.test_dir, args.test_gt_dir,
                                   self.converter, shuffle=False, batch_size=1)

        self.model = ResNetV2(self.cfg, self.converter.num_classes)
        self.model.create_architecture()
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

        self.epoch_start_index = 0
        self.batch_start_index = 0
Exemple #3
0
 def __init__(self, identity: str, on_disconnect=None):
     config = load_config()
     self.host = config['remote_host']
     self.port = config['remote_port']
     self.connection = None
     self.on_disconnect = on_disconnect
     self.identity = identity
     self.connected = False
Exemple #4
0
    def do_work(self):
        """Main body of the task, run over and over again continuously
        """

        conf = load_config('/app.conf')
        # get the next job from the configured queue
        queue = Queue(conf)
        task = queue.lease()

        # process the job, building a container if required
        task.run()
Exemple #5
0
def main(opts: Namespace) -> int:
    config: ApplicationConfig = load_config(opts.config)

    app = App(config)
    app.setup_routes()

    try:
        app.run_app()

        return 0
    except Exception:
        return 1
Exemple #6
0
def main():
    load_config()
    write_process_number()

    Notify.init('Notifyhub')

    signal.signal(signal.SIGRTMIN, on_prev_notification)
    signal.signal(signal.SIGRTMIN + 1, on_next_notification)
    signal.signal(signal.SIGRTMIN + 2, on_prev_connection)
    signal.signal(signal.SIGRTMIN + 3, on_next_connection)
    signal.signal(signal.SIGRTMIN + 4, on_mark_as_read)

    loop = get_event_loop()

    for (name, label, config) in list_auths():
        conn = make_connection(name, label, config)
        app.add_connection(conn)

        loop.run_in_executor(None, partial(start_update_loop, conn.id))

    loop.run_forever()
Exemple #7
0
def main(args):
    config = load_config(args.config_file)

    stop_event = threading.Event()
    def _sig_handler(signo, frame):
        stop_event.set()
    signal.signal(signal.SIGINT, _sig_handler)
    signal.signal(signal.SIGTERM, _sig_handler)

    from components.capture import AudioCaptureTask
    from components.lights import LightOutputTask
    from components.mapper import MapperTask
    from components.webgui import WebGUITask
    from components.network import NetworkTask

    fps = FPSCounter('All')

    # Initialize tasks first so that the dict can be passed to all tasks, giving them access to other tasks
    tasks = {}
    tasks['audio'] = AudioCaptureTask(tasks, config)
    tasks['mapper'] = MapperTask(tasks, config)
    tasks['lights'] = LightOutputTask(tasks, config)
    tasks['webgui'] = WebGUITask(tasks, config)
    tasks['network'] = NetworkTask(tasks, config)

    try:
        for task in tasks.values():
            task.setup()

        while not stop_event.is_set():
            with fps:
                data = {}
                for t in tasks.values():
                    try:
                        t.run(data)
                    except:
                        logger.error("Failure in %s", t.__class__.__name__, exc_info=True)
    finally:
        # print("Exiting")
        for t in tasks.values():
            # print("Stop " + t.__class__.__name__)
            try:
                t.teardown()
            except:
                logger.error("Failed to tear down %s", t.__class__.__name__, exc_info=True)
Exemple #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config-env',
                        help="config environment",
                        dest="config_env",
                        default="production")
    parser.add_argument('--force',
                        help="force bootstrap of account even if files exist",
                        dest="force",
                        action="store_true",
                        default=False)
    parser.add_argument('--debug',
                        help="set debug level (0-4)",
                        dest="debug",
                        nargs="?",
                        const=0,
                        type=int)
    parser.add_argument('--noop',
                        help="only print actions, make no changes",
                        dest="noop",
                        action="store_true",
                        default=False)
    parser.add_argument('--report',
                        help="generate report of what will be done",
                        dest="report",
                        action="store_true",
                        default=False)
    parser.add_argument('--report-space',
                        help="report on space that can be removed",
                        dest="report_space",
                        action="store_true",
                        default=False)
    parser.add_argument('--account',
                        help="account to create",
                        dest="account",
                        default=None)
    parser.add_argument('--exclude-accounts',
                        nargs="+",
                        help="accounts to exclude",
                        dest="exclude_accounts",
                        default=[])
    args = parser.parse_args()
    options = vars(args)

    # Set values based on loaded config
    config = load_config()
    _auth_token = config[args.config_env].get("api_auth_token")
    _account_home_config = config[args.config_env].get("account_home")
    _cleanup_exclude = _account_home_config.get("cleanup_exclude",
                                                []) + args.exclude_accounts
    _host = config[args.config_env].get("host")
    _port = config[args.config_env].get("port")
    _https = config[args.config_env].get("https")
    _protocol = 'https' if _https else 'http'
    _url = "%s://%s:%s/" % (
        _protocol, _host, _port) if _port else "%s://%s/" % (_protocol, _host)
    _json_headers = {
        "Accept": "application/json",
        "Content-Type": "application/json",
        "Authorization": "Token token=%s" % _auth_token,
    }

    # Setup logging
    setup_logging(debug=args.debug, noop=args.noop)

    logger.debug4("OPTIONS: %s" % options)
    logger.debug4("CONFIG: %s" % config)

    # Get status ID
    status = actmgr_api.get_status(url=_url,
                                   headers=_json_headers,
                                   name='CLOSED')
    logger.debug1("STATUS: %s", status)
    status_id = status.get("id")

    # Get accounts and perform account cleanup steps
    if args.account:
        accounts = actmgr_api.get_accounts(url=_url,
                                           headers=_json_headers,
                                           params={
                                               "username": args.account,
                                               "status_id": status_id
                                           })
    else:
        accounts = actmgr_api.get_accounts(url=_url,
                                           headers=_json_headers,
                                           params={"status_id": status_id})
    logger.debug4("Number of accounts returned: %s", len(accounts))

    _report = []
    for account in accounts:
        logger.debug4("Account data: %s", json.dumps(account))
        _username = account["username"]
        if _username in _cleanup_exclude:
            logger.info("EXCLUDED: %s", _username)
            continue
        try:
            _shell = getpwnam(_username).pw_shell
        except KeyError:
            logger.warn("Unable to get shell for %s", _username)
            _shell = None
        if _shell != '/sbin/nologin':
            logger.warn("User %s shell %s != /sbin/nologin", _username, _shell)
            continue

        _account_home = AccountHome(username=_username,
                                    config=_account_home_config,
                                    options=options)
        _slurm_account = SlurmAccount(username=_username, options=options)

        if args.report:
            _account_home.check_path_owner(_account_home.home)
            _account_home.check_path_owner(_account_home.scratch)
            for _dir in _account_home.extra_directories:
                _account_home.check_path_owner(_dir)
            _data = {}
            _data["username"] = _username
            _data["HOME"] = _account_home.home_exists()
            _data["SCRATCH"] = _account_home.scratch_exists()
            _data["EXTRA"] = _account_home.extra_directories
            _data["SLURM"] = _slurm_account.exists()
            if args.report_space:
                _data["HOME_USED"] = get_space_used(
                    host=_account_home_config["server"],
                    path=_account_home.home)
                _data["SCRATCH_USED"] = get_space_used(
                    path=_account_home.scratch)
                _data["EXTRA_USED"] = 0
                for _dir in _account_home.extra_directories:
                    _data["EXTRA_USED"] += get_space_used(path=_dir)
            _report.append(_data)
        else:
            _account_home.cleanup()
            _slurm_account.delete()
    if args.report:
        if args.report_space:
            table = prettytable.PrettyTable([
                "Username", "HOME", "HOME-USED", "SCRATCH", "SCRATCH-USED",
                "EXTRA", "EXTRA-USED", "SLURM"
            ])
        else:
            table = prettytable.PrettyTable(
                ["Username", "HOME", "SCRATCH", "EXTRA", "SLURM"])
        table.hrules = prettytable.FRAME
        _home_total = 0
        _home_used_total = 0
        _scratch_total = 0
        _scratch_used_total = 0
        _extra_total = 0
        _extra_used_total = 0
        _slurm_total = 0
        for r in sorted(_report, key=lambda k: k["username"]):
            _home = r["HOME"]
            _scratch = r["SCRATCH"]
            _extra = r["EXTRA"]
            _slurm = r["SLURM"]
            if _home:
                _home_total += 1
            if _scratch:
                _scratch_total += 1
            if _extra:
                _extra_total += len(_extra)
            if _slurm:
                _slurm_total += 1
            if args.report_space:
                _home_used = bytes2human(r["HOME_USED"])
                _home_used_total += r["HOME_USED"]
                _scratch_used = bytes2human(r["SCRATCH_USED"])
                _scratch_used_total += r["SCRATCH_USED"]
                _extra_used = bytes2human(r["EXTRA_USED"])
                _extra_used_total += r["EXTRA_USED"]
                table.add_row([
                    r["username"], _home, _home_used, _scratch, _scratch_used,
                    "\n".join(_extra), _extra_used, _slurm
                ])
            else:
                table.add_row([
                    r["username"], _home, _scratch, "\n".join(_extra), _slurm
                ])
        if args.report_space:
            table.add_row(["", "", "", "", "", "", "", ""])
            table.add_row([
                "Total", _home_total,
                bytes2human(_home_used_total), _scratch_total,
                bytes2human(_scratch_used_total), _extra_total,
                bytes2human(_extra_used_total), _slurm_total
            ])
        else:
            table.add_row(["", "", "", "", ""])
            table.add_row([
                "Total", _home_total, _scratch_total, _extra_total,
                _slurm_total
            ])
        print table
Exemple #9
0
                        help='Host to listen on.')
    parser.add_argument('--port',
                        default=9002,
                        type=int,
                        help='Port to listen on.')
    parser.add_argument('-s', '--settings', type=str, help='Settings file.')

    args = parser.parse_args()

    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    if args.settings:
        load_config(args.settings)

    required = 'iso-dir', 'hdd-dir', 'vms-dir'
    for row in required:
        if not config(row):
            log.error(
                'The %r value is missing in your configuration! '
                'Please provide it and run VBoxRPC again.', row)
            exit(1)

        path = config(row)
        if not os.path.isdir(path):
            log.info('Creating directory %r', path)
            os.makedirs(path)

    app = create_app(debug=args.debug)
Exemple #10
0
    version = ''
    env = ''
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
            sys.exit(0)
        if opt in ('-c', '--config'):
            config.set_filepath(arg)
        if opt in ('-e', '--environment'):
            env = arg
        if opt in ('-v', '--version'):
            version = arg

    # load config and environment data
    config.set_script_directory(os.path.dirname(os.path.realpath(__file__)))
    config.load_config()
    config.set_env(env)

    db = database.DB(config.get_env())
    # parse command
    for cmd in args:
        if cmd in ("initial_migration"):
            migrations.create_initial_migration()
            break
        elif cmd in ("new_migration"):
            migrations.create_new_migration()
            break
        elif cmd in ("new_seed"):
            seeds.create_seed()
            break
        elif cmd in ("db:create"):
Exemple #11
0
import os

from torch.utils.data import DataLoader

from lib.models import load_model
from lib.config import load_config
from lib.losses import load_criterion
from lib.optimizers import load_optimizer
from lib.training import cycle, save_state
from lib.logging import get_summary_writer
from lib.datasets import ECGDataset, collate_longest_in_batch

CONFIG = "./experiments/001.yaml"

if __name__ == "__main__":
    cfg, model_dir, vis_dir, log_dir = load_config(CONFIG)

    for fold in range(1, cfg['data']['n_folds']):

        # Data
        ds_train = ECGDataset(cfg, 'train', fold)
        ds_test = ECGDataset(cfg, 'test', fold)
        dl_train = DataLoader(ds_train,
                              cfg['training']['batch_size'],
                              shuffle=True,
                              num_workers=cfg['training']['n_workers'],
                              pin_memory=True,
                              collate_fn=collate_longest_in_batch)
        dl_test = DataLoader(ds_test,
                             cfg['training']['batch_size'],
                             shuffle=False,
Exemple #12
0
import time
import datetime

from selenium import webdriver

from xml.etree import ElementTree

from lib.config import load_config

from lib.db import db_connect, db_close, insert_item, item_exists_in_db

from lib.item import Item, clean_link, format_link

from lib.parser import magic_decoding

CONFIG = load_config().get('feeder')

LIB_DIR_ABSPATH = os.path.dirname(os.path.abspath(__file__))
RSS_SOURCES_FILEPATH = os.path.join(LIB_DIR_ABSPATH, '../rss_sources.json')
PHANTOM_JS_DRIVER_PATH = os.path.join(LIB_DIR_ABSPATH, '../phantomjs')
PHANTOM_JS_DRIVER_CAPS = dict()
PHANTOM_JS_DRIVER_ARGS = CONFIG.get('PHANTOM_JS_DRIVER_ARGS')
PAGE_LOAD_TIMEOUT = CONFIG.get('PAGE_LOAD_TIMEOUT')
DEFAULT_USERNAME = CONFIG.get('DEFAULT_USERNAME')
MOBILE_USER_AGENT = CONFIG.get('MOBILE_USER_AGENT')
DESKTOP_USER_AGENT = CONFIG.get('DESKTOP_USER_AGENT')

def get_content(driver, url):
    """
        Fetch web content from an url.
Exemple #13
0
def main():
    # Initialize the logging library.
    import lib.logcontrol
    import logging
    lib.logcontrol.init_logging()
    logger = logging.getLogger("apiclient")

    # Load up the configuration, this includes parsing any command line
    # arguments.
    import lib.config as config
    config.CONFIG = config.load_config()
    if "verbosity" in config.CONFIG:
        lib.logcontrol.set_level(config.CONFIG["verbosity"])
    logger.debug(
        "Final configuration dictionary...\n%s",
        pprint.pformat(config.CONFIG, width = 72)
    )
    lib.logcontrol.show_tracebacks = config.CONFIG["show-tracebacks"]

    # Set to True by any of the "do something and exit" options.
    exit_now = False

    # If the user wants to logout...
    if config.CONFIG.get("logout"):
        session_path = config.CONFIG["session-path"]
        if os.path.isfile(session_path):
            logger.info("Deleting session file at %s.", session_path)
            os.remove(session_path)
        else:
            logger.info(
                "No session file exists at %s. Doing nothing.",
                session_path
            )

        exit_now = True

    # If the user wants to clear the cache...
    if config.CONFIG.get("clear-api-info"):
        api_info_path = config.CONFIG["api-info-path"]
        if os.path.isfile(api_info_path):
            logger.info("Deleting session file at %s.", api_info_path)
            os.remove(api_info_path)
        else:
            logger.info(
                "No session file exists at %s. Doing nothing.",
                api_info_path
            )

        exit_now = True

    # If the user wants to save their configuration
    if config.CONFIG.get("save"):
        import lib.utils

        save_to = config.CONFIG.get("config", config.DEFAULT_CONFIG_PATHS[0])
        save_to = lib.utils.resolve_path(save_to)

        logger.info("Saving configuration settings to %s.", save_to)

        serialized_config = lib.config.dump_config()
        logger.debug("Configuration...\n%s", serialized_config)

        try:
            config_dir = os.path.dirname(save_to)
            if not os.path.exists(config_dir):
                logger.debug("Creating %s directory.", config_dir)
                lib.utils.prepare_directory(config_dir)

            with open(save_to, "w") as f:
                f.write(serialized_config)
        except IOError:
            logger.critical(
                "Could not save configuration.", exc_info = sys.exc_info()
            )
            sys.exit(1)

        logger.info("Successfully saved configuration settings.")

        exit_now = True

    if exit_now:
        sys.exit(0)

    # Grab the user's old session information if they are already logged in.
    import lib.communicate
    session = lib.communicate.APIClientSession()
    session.load()

    save_session = False

    # Login if necessary
    import lib.ui
    if session.user is None:
        if config.CONFIG.get("use-oauth"):
            session.login_oauth2()
        else:
            session.login(*lib.ui.determine_credentials())

        save_session = True

    # Request the API info from the server if we don't have it cached
    if session.api_info is None:
        session.fetch_api_info()
        save_session = True

    # Save the session if we had to login or if we replenished our cache
    # (because they are tied together artificially by our design).
    if save_session:
        session.save()

    # Enter the shell or execute a command.
    if config.CONFIG.get("shell"):
        import lib.shell
        new_shell = lib.shell.APIShell(session)

        try:
            new_shell.cmdloop()
        except KeyboardInterrupt:
            while True:
                try:
                    new_shell.cmdloop("")
                except KeyboardInterrupt:
                    print "\nInterrupted..."
                else:
                    break

        print "Exiting..."
    else:
        # Perform the command the user wants to execute
        command_args, command_kwargs = lib.ui.parse_raw_args(config.ARGS)

        if command_args:
            session.call(command_args[0], *command_args[1:], **command_kwargs)
        else:
            logger.info("No command given. Doing nothing...")
import sys
import base64
import ConfigParser
import requests
import json
import logging
import re
from urlparse import urljoin
import prettytable

BASE_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
from lib.config import load_config
from lib.logs import setup_logging

configs = load_config()
config = configs['production']['pulp']

hostname = config.get("hostname", "localhost")
username = config.get("username", "admin")
password = config.get("password", "password")

logger = logging.getLogger()
setup_logging()

auth_str = "%s:%s" % (username, password)
auth_encoded = base64.b64encode(auth_str)
auth = "Basic %s" % auth_encoded

rest_headers = {
    'Content-Type': 'application/json',
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config-env', help="config environment", dest="config_env", default="production")
    parser.add_argument('--force', help="force bootstrap of account even if files exist", dest="force", action="store_true", default=False)
    parser.add_argument('--debug', help="set debug level (0-4)", dest="debug", nargs="?", const=0, type=int)
    parser.add_argument('--noop', help="only print actions, make no changes", dest="noop", action="store_true", default=False)
    parser.add_argument('--report', help="generate report of what will be done", dest="report", action="store_true", default=False)
    parser.add_argument('--report-space', help="report on space that can be removed", dest="report_space", action="store_true", default=False)
    parser.add_argument('--account', help="account to create", dest="account", default=None)
    parser.add_argument('--exclude-accounts', nargs="+", help="accounts to exclude", dest="exclude_accounts", default=[])
    args = parser.parse_args()
    options = vars(args)

    # Set values based on loaded config
    config = load_config()
    _auth_token = config[args.config_env].get("api_auth_token")
    _account_home_config = config[args.config_env].get("account_home")
    _cleanup_exclude = _account_home_config.get("cleanup_exclude", []) + args.exclude_accounts
    _host = config[args.config_env].get("host")
    _port = config[args.config_env].get("port")
    _https = config[args.config_env].get("https")
    _protocol = 'https' if _https else 'http'
    _url = "%s://%s:%s/" % (_protocol, _host, _port) if _port else "%s://%s/" % (_protocol, _host)
    _json_headers = {
        "Accept": "application/json",
        "Content-Type": "application/json",
        "Authorization": "Token token=%s" % _auth_token,
    }

    # Setup logging
    setup_logging(debug=args.debug, noop=args.noop)

    logger.debug4("OPTIONS: %s" % options)
    logger.debug4("CONFIG: %s" % config)

    # Get status ID
    status = actmgr_api.get_status(url=_url, headers=_json_headers, name='CLOSED')
    logger.debug1("STATUS: %s", status)
    status_id = status.get("id")

    # Get accounts and perform account cleanup steps
    if args.account:
        accounts = actmgr_api.get_accounts(url=_url, headers=_json_headers, params={"username": args.account, "status_id": status_id})
    else:
        accounts = actmgr_api.get_accounts(url=_url, headers=_json_headers, params={"status_id": status_id})
    logger.debug4("Number of accounts returned: %s", len(accounts))

    _report = []
    for account in accounts:
        logger.debug4("Account data: %s", json.dumps(account))
        _username = account["username"]
        if _username in _cleanup_exclude:
            logger.info("EXCLUDED: %s", _username)
            continue
        try:
            _shell = getpwnam(_username).pw_shell
        except KeyError:
            logger.warn("Unable to get shell for %s", _username)
            _shell = None
        if _shell != '/sbin/nologin':
            logger.warn("User %s shell %s != /sbin/nologin", _username, _shell)
            continue

        _account_home = AccountHome(username=_username, config=_account_home_config, options=options)
        _slurm_account = SlurmAccount(username=_username, options=options)

        if args.report:
            _account_home.check_path_owner(_account_home.home)
            _account_home.check_path_owner(_account_home.scratch)
            for _dir in _account_home.extra_directories:
                _account_home.check_path_owner(_dir)
            _data = {}
            _data["username"] = _username
            _data["HOME"] = _account_home.home_exists()
            _data["SCRATCH"] = _account_home.scratch_exists()
            _data["EXTRA"] = _account_home.extra_directories
            _data["SLURM"] = _slurm_account.exists()
            if args.report_space:
                _data["HOME_USED"] = get_space_used(host=_account_home_config["server"], path=_account_home.home)
                _data["SCRATCH_USED"] = get_space_used(path=_account_home.scratch)
                _data["EXTRA_USED"] = 0
                for _dir in _account_home.extra_directories:
                    _data["EXTRA_USED"] += get_space_used(path=_dir)
            _report.append(_data)
        else:
            _account_home.cleanup()
            _slurm_account.delete()
    if args.report:
        if args.report_space:
            table = prettytable.PrettyTable(["Username", "HOME", "HOME-USED", "SCRATCH", "SCRATCH-USED", "EXTRA", "EXTRA-USED", "SLURM"])
        else:
            table = prettytable.PrettyTable(["Username", "HOME", "SCRATCH", "EXTRA", "SLURM"])
        table.hrules = prettytable.FRAME
        _home_total = 0
        _home_used_total = 0
        _scratch_total = 0
        _scratch_used_total = 0
        _extra_total = 0
        _extra_used_total = 0
        _slurm_total = 0
        for r in sorted(_report, key=lambda k: k["username"]):
            _home = r["HOME"]
            _scratch = r["SCRATCH"]
            _extra = r["EXTRA"]
            _slurm = r["SLURM"]
            if _home:
                _home_total += 1
            if _scratch:
                _scratch_total += 1
            if _extra:
                _extra_total += len(_extra)
            if _slurm:
                _slurm_total += 1
            if args.report_space:
                _home_used = bytes2human(r["HOME_USED"])
                _home_used_total += r["HOME_USED"]
                _scratch_used = bytes2human(r["SCRATCH_USED"])
                _scratch_used_total += r["SCRATCH_USED"]
                _extra_used = bytes2human(r["EXTRA_USED"])
                _extra_used_total += r["EXTRA_USED"]
                table.add_row([r["username"], _home, _home_used, _scratch, _scratch_used, "\n".join(_extra), _extra_used, _slurm])
            else:
                table.add_row([r["username"], _home, _scratch, "\n".join(_extra), _slurm])
        if args.report_space:
            table.add_row(["", "", "", "", "", "", "", ""])
            table.add_row(["Total", _home_total, bytes2human(_home_used_total), _scratch_total, bytes2human(_scratch_used_total), _extra_total, bytes2human(_extra_used_total), _slurm_total])
        else:
            table.add_row(["", "", "", "", ""])
            table.add_row(["Total", _home_total, _scratch_total, _extra_total, _slurm_total])
        print table
Exemple #16
0
def main():
    # Initialize the logging library.
    import lib.logcontrol
    import logging
    lib.logcontrol.init_logging()
    logger = logging.getLogger("apiclient")

    # Load up the configuration, this includes parsing any command line
    # arguments.
    import lib.config as config
    config.CONFIG = config.load_config()
    if "verbosity" in config.CONFIG:
        lib.logcontrol.set_level(config.CONFIG["verbosity"])
    logger.debug("Final configuration dictionary...\n%s",
                 pprint.pformat(config.CONFIG, width=72))
    lib.logcontrol.show_tracebacks = config.CONFIG["show-tracebacks"]

    # Set to True by any of the "do something and exit" options.
    exit_now = False

    # If the user wants to logout...
    if config.CONFIG.get("logout"):
        session_path = config.CONFIG["session-path"]
        if os.path.isfile(session_path):
            logger.info("Deleting session file at %s.", session_path)
            os.remove(session_path)
        else:
            logger.info("No session file exists at %s. Doing nothing.",
                        session_path)

        exit_now = True

    # If the user wants to clear the cache...
    if config.CONFIG.get("clear-api-info"):
        api_info_path = config.CONFIG["api-info-path"]
        if os.path.isfile(api_info_path):
            logger.info("Deleting session file at %s.", api_info_path)
            os.remove(api_info_path)
        else:
            logger.info("No session file exists at %s. Doing nothing.",
                        api_info_path)

        exit_now = True

    # If the user wants to save their configuration
    if config.CONFIG.get("save"):
        import lib.utils

        save_to = config.CONFIG.get("config", config.DEFAULT_CONFIG_PATHS[0])
        save_to = lib.utils.resolve_path(save_to)

        logger.info("Saving configuration settings to %s.", save_to)

        serialized_config = lib.config.dump_config()
        logger.debug("Configuration...\n%s", serialized_config)

        try:
            config_dir = os.path.dirname(save_to)
            if not os.path.exists(config_dir):
                logger.debug("Creating %s directory.", config_dir)
                lib.utils.prepare_directory(config_dir)

            with open(save_to, "w") as f:
                f.write(serialized_config)
        except IOError:
            logger.critical("Could not save configuration.",
                            exc_info=sys.exc_info())
            sys.exit(1)

        logger.info("Successfully saved configuration settings.")

        exit_now = True

    if exit_now:
        sys.exit(0)

    # Grab the user's old session information if they are already logged in.
    import lib.communicate
    session = lib.communicate.APIClientSession()
    session.load()

    save_session = False

    # Login if necessary
    import lib.ui
    if session.user is None:
        if config.CONFIG.get("use-oauth"):
            session.login_oauth2()
        else:
            session.login(*lib.ui.determine_credentials())

        save_session = True

    # Request the API info from the server if we don't have it cached
    if session.api_info is None:
        session.fetch_api_info()
        save_session = True

    # Save the session if we had to login or if we replenished our cache
    # (because they are tied together artificially by our design).
    if save_session:
        session.save()

    # Enter the shell or execute a command.
    if config.CONFIG.get("shell"):
        import lib.shell
        new_shell = lib.shell.APIShell(session)

        try:
            new_shell.cmdloop()
        except KeyboardInterrupt:
            while True:
                try:
                    new_shell.cmdloop("")
                except KeyboardInterrupt:
                    print "\nInterrupted..."
                else:
                    break

        print "Exiting..."
    else:
        # Perform the command the user wants to execute
        command_args, command_kwargs = lib.ui.parse_raw_args(config.ARGS)

        if command_args:
            session.call(command_args[0], *command_args[1:], **command_kwargs)
        else:
            logger.info("No command given. Doing nothing...")
Exemple #17
0
from lib.config import load_config
from lib.ObjectDetection import ObjectDetection

object_detection = ObjectDetection(load_config())
object_detection.start()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--username', help="User's username", required=True)
    parser.add_argument('--new-group', help="New group to assign", required=True)
    parser.add_argument('--old-group', help="Old group", required=True)
    parser.add_argument('--config-env', help="config environment", dest="config_env", default="production")
    parser.add_argument('--debug', help="set debug level (0-4)", dest="debug", nargs="?", const=0, type=int)
    args = parser.parse_args()
    options = vars(args)

    config = load_config()
    config_env = config[args.config_env]["ldap"]
    _account_home_config = config[args.config_env].get("account_home")
    _auth_token = config[args.config_env].get("api_auth_token")
    _host = config[args.config_env].get("host")
    _port = config[args.config_env].get("port")
    _https = config[args.config_env].get("https")
    _protocol = 'https' if _https else 'http'
    _url = "%s://%s:%s/" % (_protocol, _host, _port) if _port else "%s://%s/" % (_protocol, _host)
    _json_headers = {
        "Accept": "application/json",
        "Content-Type": "application/json",
        "Authorization": "Token token=%s" % _auth_token,
    }

    # Setup logging
    setup_logging(debug=args.debug, noop=False)

    logger.debug4("OPTIONS: %s" % options)
    logger.debug4("CONFIG: %s" % config_env)

    _ldap_url = config_env.get("url")
    _use_tls = config_env.get("tls")
    _bind_dn = config_env.get("bind_dn", None)
    _bind_pass = config_env.get("bind_pass", None)

    group_search_base = "ou=Groups,dc=brazos,dc=tamu,dc=edu"
    user_search_base = "ou=People,dc=brazos,dc=tamu,dc=edu"
    new_group_filter = "cn=%s" % args.new_group
    old_group_filter = "cn=%s" % args.old_group
    user_filter = "uid=%s" % args.username
    group_attribs = [
        "dn",
        "cn",
        "gidNumber",
        "uniqueMember",
        "slurmAccountName",
    ]
    user_attribs = [
        "dn",
        "uid",
        "gidNumber",
    ]
    scope = "one"

    local_ldap = LocalLdap(url=_ldap_url[0], use_tls=_use_tls, bind_dn=_bind_dn, bind_pass=_bind_pass, log_level=None)
    new_group_results = local_ldap.paged_search(base=group_search_base, sfilter=new_group_filter, attrlist=group_attribs, scope=scope)
    old_group_results = local_ldap.paged_search(base=group_search_base, sfilter=old_group_filter, attrlist=group_attribs, scope=scope)
    user_results = local_ldap.paged_search(base=user_search_base, sfilter=user_filter, attrlist=user_attribs, scope=scope)

    logger.debug("LDAP new group: %s", json.dumps(new_group_results))
    logger.debug("LDAP old group: %s", json.dumps(old_group_results))
    logger.debug("LDAP user: %s", json.dumps(user_results))

    if len(new_group_results) != 1 or len(old_group_results) != 1:
        logger.error("Incorrect number of LDAP group results returned")
        sys.exit(1)
    if len(user_results) != 1:
        logger.error("Incorrect number of LDAP user results returned")
        sys.exit(1)

    ldap_group_new = LdapGroup()
    ldap_group_new.setattrs(data=new_group_results[0], listvals=["uniqueMember"])
    ldap_group_old = LdapGroup()
    ldap_group_old.setattrs(data=old_group_results[0], listvals=["uniqueMember"])
    ldap_user = LdapUser()
    ldap_user.setattrs(data=user_results[0])

    # Not all LDAP groups have slurmAccountName attribute
    if not hasattr(ldap_group_new, 'slurmAccountName'):
        ldap_group_new.slurmAccountName = ldap_group_new.cn
    if not hasattr(ldap_group_old, 'slurmAccountName'):
        ldap_group_old.slurmAccountName = ldap_group_old.cn

    # Check certain things exist to avoid sending None to LDAP which could delete more than we want
    _ldap_group_new_valid = True
    _ldap_group_old_valid = True
    for a in ['dn', 'gidNumber', 'slurmAccountName', 'uniqueMember', 'cn']:
        if not hasattr(ldap_group_new, a):
            _ldap_group_new_valid = False
        elif getattr(ldap_group_new, a) is None:
            _ldap_group_new_valid = False
        if not hasattr(ldap_group_old, a):
            _ldap_group_old_valid = False
        elif getattr(ldap_group_old, a) is None:
            _ldap_group_old_valid = False
    if not _ldap_group_new_valid:
        logger.error("LDAP group %s does not have all necessary information", args.new_group)
    if not _ldap_group_old_valid:
        logger.error("LDAP group %s does not have all necessary information", args.old_group)
    if ldap_user.dn is None or ldap_user.uid is None or ldap_user.gidNumber is None:
        logger.error("LDAP user %s does not have all necessary information", args.username)

    ## Update account management database
    get_old_group_params = {
        "name": args.old_group,
    }
    old_group_data = actmgr_api.get_groups(_url, _json_headers, get_old_group_params)
    old_group = old_group_data[0]
    logger.debug("Old Group API data: %s", json.dumps(old_group))

    get_new_group_params = {
        "name": args.new_group,
    }
    new_group_data = actmgr_api.get_groups(_url, _json_headers, get_new_group_params)
    new_group = new_group_data[0]
    logger.debug("New Group API data: %s", json.dumps(new_group))

    get_account_params = {
        "username": args.username,
    }
    account_data = actmgr_api.get_accounts(_url, _json_headers, get_account_params)
    account = account_data[0]
    logger.debug("Account API data: %s", json.dumps(account))

    update_account_data = {
        "primary_group_id": new_group['id'],
    }
    # Handle cases where person is assigned old group in both primary and auxiliary groups
    group_ids = []
    for g in account["groups"]:
        if g["id"] == old_group["id"]:
            group_ids.append(new_group["id"])
        else:
            group_ids.append(g["id"])
    if group_ids:
        update_account_data['group_ids'] = group_ids

    account = actmgr_api.update_account(_url, _json_headers, account['id'], update_account_data)
    if not account or not account["account"]:
        logger.error("Failed to update account management data")
        sys.exit(1)
    logger.debug("Account updated API data: %s", json.dumps(account))
    account = account["account"]

    ## Update LDAP
    if ldap_user.gidNumber != ldap_group_new.gidNumber:
        logger.info("LDAP replace %s gidNumber=%s", ldap_user.dn, ldap_group_new.gidNumber)
        local_ldap.modify(ldap_user.dn, [(ldap.MOD_REPLACE, 'gidNumber', ldap_group_new.gidNumber)])
    else:
        logger.warn("Skipping LDAP update of user gidNumber - already updated")

    if ldap_user.dn not in ldap_group_new.uniqueMember:
        logger.info("LDAP add to %s uniqueMember=%s", ldap_group_new.dn, ldap_user.dn)
        local_ldap.modify(ldap_group_new.dn, [(ldap.MOD_ADD, "uniqueMember", ldap_user.dn)])
    else:
        logger.warn("Skipping LDAP update of group add uniqueMember - already updated")

    if ldap_user.dn in ldap_group_old.uniqueMember:
        logger.info("LDAP delete from %s uniqueMember=%s", ldap_group_old.dn, ldap_user.dn)
        local_ldap.modify(ldap_group_old.dn, [(ldap.MOD_DELETE, "uniqueMember", ldap_user.dn)])
    else:
        logger.warn("Skipping LDAP update of group delete uniqueMember - already updated")

    ## Update SLURM
    _slurm_account = account["primary_group"]["alias"]
    _slurm_accounts = [g["alias"] for g in account["groups"] if "alias" in g]
    if _slurm_account not in _slurm_accounts:
        _slurm_accounts.append(_slurm_account)

    if not _slurm_account or not _slurm_accounts:
        logger.error("SLURM accounts not correctly determined")
        sys.exit(1)

    sacctmgr_check_args = [
        "--parsable2", "--noheader", "show", "user",
        "name=%s" % args.username, "account=%s" % _slurm_account,
        "format=User,DefaultAccount,Account", "WithAssoc",
    ]
    logger.debug("Executing: sacctmgr %s", " ".join(sacctmgr_check_args))
    try:
        output = sacctmgr(sacctmgr_check_args)
    except ErrorReturnCode:
        logger.error("FAILED to check if SLURM account already exists.")
        sys.exit(1)
    expected_output = "%s|%s|%s" % (args.username, _slurm_account, _slurm_account)
    existing_slurm_accounts = output.split(os.linesep)
    if expected_output not in existing_slurm_accounts:
        sacctmgr_delete_args = ["-i", "delete", "user","where", "name=%s" % args.username, "account=%s" % ldap_group_old.slurmAccountName]
        logger.debug("Executing: sacctmgr %s", " ".join(sacctmgr_delete_args))
        try:
            output = sacctmgr(sacctmgr_delete_args)
        except ErrorReturnCode:
            logger.error("FAILED to delete user from SLURM.")
            sys.exit(1)

        sacctmgr_create_args = [
            "-i", "create", "user", args.username,
            "account=%s" % ",".join(_slurm_accounts),
            "defaultaccount=%s" % _slurm_account,
        ]
        logger.debug("Executing: sacctmgr %s", " ".join(sacctmgr_create_args))
        try:
            output = sacctmgr(sacctmgr_create_args)
        except ErrorReturnCode:
            logger.error("FAILED to retrieve all user names from SLURM.")
            sys.exit(1)
    else:
        logger.warn("Skipping SLURM account modifications - record already exists")

    ## Update permissions of $HOME and $SCRATCH
    home_path = os.path.join(_account_home_config.get("base_dir"), args.username)
    scratch_path = os.path.join(_account_home_config.get("scratch_base"), args.username)
    find_home_args = [
        home_path, "-group", args.old_group, "-exec", "chgrp", args.new_group, '{}', ';'
    ]
    logger.info("Changing group ownership of files under %s", home_path)
    logger.debug("Executing: find %s", " ".join(find_home_args))
    try:
        find(find_home_args)
    except ErrorReturnCode, e:
        logger.error("Failed to fix permissions of %s: %s", home_path, e.stderr)
        sys.exit(1)
Exemple #19
0
def main():
    cfg = load_config(CONFIG)

    # distributed settings
    parser = argparse.ArgumentParser()
    parser.add_argument('--local_rank', type=int, default=0)
    parser.add_argument('--ngpu', type=int, default=4)
    args = parser.parse_args()

    if cfg['training']['data_parallel'] == 'distributed':
        distributed = True
        local_rank = args.local_rank
        torch.cuda.set_device(local_rank)
        world_size = args.ngpu
        torch.distributed.init_process_group('nccl', init_method="tcp://localhost:16534", world_size=world_size, rank=local_rank)
    else:
        distributed = False
        local_rank = None
        world_size = None

    # settings
    bs_train, bs_test, n_workers = cfg['training']['batch_size_train'], cfg['training']['batch_size_test'], cfg['training']['n_workers']
    n_epochs = cfg['training']['n_epochs']
    transforms_train, transforms_test = load_transforms(cfg)

    # data
    ds_train = E32Dataset(cfg, cfg['paths']['data_train'], 'train', transforms=transforms_train)
    ds_test = E32Dataset(cfg, cfg['paths']['data_test'], 'test', transforms=transforms_test)
    sampler_train = DistributedSampler(ds_train, num_replicas=world_size, rank=local_rank) if distributed else None
    sampler_test = DistributedSampler(ds_test, num_replicas=world_size, rank=local_rank) if distributed else None
    dl_train = DataLoader(ds_train, bs_train, shuffle=False if distributed else True, num_workers=n_workers, pin_memory=False, sampler=sampler_train)
    dl_test = DataLoader(ds_test, bs_test, shuffle=False, num_workers=n_workers, pin_memory=False, sampler=sampler_test)

    # model
    model, starting_epoch, state = load_model(cfg, local_rank)
    optimizer, scheduler = load_optimizer(model, cfg, state, steps_per_epoch=(len(dl_train)))
    train_criterion, test_criterion = load_criterion(cfg)

    # WandB
    if not local_rank:
        wandb.init(project="a4c3d", config=cfg, notes=cfg.get("description", None))
        wandb.save("*.mp4")  # Write MP4 files immediately to WandB
        wandb.watch(model)

    # training
    best_loss, best_path, last_save_path = 1e10, None, None

    for epoch in range(starting_epoch, n_epochs + 1):
        if local_rank == 0:
            print(f"\nEpoch {epoch} of {n_epochs}")

        # Cycle
        train_loss = cycle('train', model, dl_train, epoch, train_criterion, optimizer, cfg, scheduler, local_rank=local_rank)
        test_loss = cycle('test', model, dl_test, epoch, test_criterion, optimizer, cfg, scheduler, local_rank=local_rank)

        # Save state if required
        if local_rank == 0:
            model_weights = model.module.state_dict() if cfg['training']['data_parallel'] else model.state_dict()
            state = {'epoch': epoch + 1,
                     'model': model_weights,
                     'optimizer': optimizer.state_dict(),
                     'scheduler': scheduler}
            save_name = f"{epoch}_{test_loss:.05f}.pt"
            best_loss, last_save_path = save_state(state, save_name, test_loss, best_loss, cfg, last_save_path, lowest_best=True)

            # Vis seg
            vis_mse(ds_test, model, epoch, cfg)

    if local_rank == 0:
        save_name = f"FINAL_{epoch}_{test_loss:.05f}.pt"
        save_state(state, save_name, test_loss, best_loss, cfg, last_save_path, force=True)
Exemple #20
0
 def reload(self, signum=None, frame=None):
     config.load_config()
     sockets.spawn_all()
     events.unregister_all()
     modules.load_all()
def main():
    args = parser.parse_args()

    config_dic = load_config(args.config_path)
    configs = Struct(**config_dic)

    # assert (torch.cuda.is_available())  # assume CUDA is always available

    print('configurations:', configs)

    # torch.cuda.set_device(configs.gpu_id)
    # torch.manual_seed(configs.seed)
    # torch.cuda.manual_seed(configs.seed)
    np.random.seed(configs.seed)
    random.seed(configs.seed)
    # torch.backends.cudnn.benchmark = False
    # torch.backends.cudnn.deterministic = True

    configs.exp_dir = 'results/' + configs.data_folder + '/' + configs.exp_dir_name
    exp_dir = configs.exp_dir

    try:
        os.makedirs(configs.exp_dir)
    except:
        pass
    try:
        os.makedirs(osp.join(configs.exp_dir, 'samples'))
    except:
        pass
    try:
        os.makedirs(osp.join(configs.exp_dir, 'checkpoints'))
    except:
        pass

    # loaders
    if 'CLEVR' in configs.data_folder:
        # we need the module's label->index dictionary from train loader
        train_loader = CLEVRTREE(phase='train',
                                 base_dir=osp.join(configs.base_dir,
                                                   configs.data_folder),
                                 batch_size=configs.batch_size,
                                 random_seed=configs.seed,
                                 shuffle=True)
        test_loader = CLEVRTREE(phase='test',
                                base_dir=osp.join(configs.base_dir,
                                                  configs.data_folder),
                                batch_size=configs.batch_size,
                                random_seed=configs.seed,
                                shuffle=False)
        gen_loader = CLEVRTREE(phase='test',
                               base_dir=osp.join(configs.base_dir,
                                                 configs.data_folder),
                               batch_size=configs.batch_size,
                               random_seed=configs.seed,
                               shuffle=False)
    elif 'COLORMNIST' in configs.data_folder:
        train_loader = COLORMNISTTREE(phase='train',
                                      directory=configs.base_dir,
                                      folder=configs.data_folder,
                                      batch_size=configs.batch_size,
                                      random_seed=configs.seed,
                                      shuffle=True)
        test_loader = CLEVRTREE(phase='test',
                                base_dir=osp.join(configs.base_dir,
                                                  configs.data_folder),
                                batch_size=configs.batch_size,
                                random_seed=configs.seed,
                                shuffle=False)
        gen_loader = COLORMNISTTREE(phase='test',
                                    directory=configs.base_dir,
                                    folder=configs.data_folder,
                                    batch_size=configs.batch_size,
                                    random_seed=configs.seed,
                                    shuffle=False)
    else:
        raise ValueError('invalid dataset folder name {}'.format(
            configs.data_folder))

    # hack, parameter
    im_size = int(gen_loader.im_size[2])

    # model
    model = PNPNet(
        hiddim=configs.hiddim,
        latentdim=configs.latentdim,
        word_size=[configs.latentdim, configs.word_size, configs.word_size],
        pos_size=[8, 1, 1],
        nres=configs.nr_resnet,
        nlayers=4,
        nonlinear='elu',
        dictionary=train_loader.dictionary,
        op=[configs.combine_op, configs.describe_op],
        lmap_size=im_size // 2**configs.ds,
        downsample=configs.ds,
        lambdakl=-1,
        bg_bias=configs.bg_bias,
        normalize=configs.normalize,
        loss=configs.loss,
        debug_mode=False,
        batch_size=configs.batch_size)

    # if configs.checkpoint is not None and len(configs.checkpoint) > 0:
    #     model.load_state_dict(torch.load(configs.checkpoint))
    #     print('load model from {}'.format(configs.checkpoint))
    # else:
    #     model.apply(weights_init)
    # dummy_data = tf.zeros([1,64,64,3])
    # fnames = ["./data/CLEVR/CLEVR_64_MULTI_LARGE/trees/train/CLEVR_new_000002.tree"]
    # dummy_tree = [pickle.load(open(fnames[0],"rb"))]
    # rec_loss, kld_loss, pos_loss, modelout = model(dummy_data,dummy_tree,fnames)
    # model.trainable_variables.sort(key=lambda x: x.name, reverse=True)
    # print([i.name for i in model.trainable_variables],len(model.trainable_variables))
    # all_trainable_variables = model.trainable_variables
    # configs.mode = "done"
    if configs.mode == 'train':
        train(model, train_loader, test_loader, gen_loader, configs=configs)
    elif configs.mode == 'test':
        print(exp_dir)
        print('Start generating...')
        generate(model,
                 gen_loader=gen_loader,
                 num_sample=2,
                 target_dir=exp_dir)
    elif configs.mode == 'visualize':
        print(exp_dir)
        print('Start visualizing...')
        visualize(model, num_sample=50, target_dir=exp_dir)
    elif configs.mode == 'sample':
        print('Sampling')
        sample_tree(model,
                    test_loader=test_loader,
                    tree_idx=4,
                    base_dir='samples',
                    num_sample=500)
    else:
        raise ValueError('Wrong mode given:{}'.format(configs.mode))
Exemple #22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--username', help="User's username", required=True)
    parser.add_argument('--new-group',
                        help="New group to assign",
                        required=True)
    parser.add_argument('--old-group', help="Old group", required=True)
    parser.add_argument('--config-env',
                        help="config environment",
                        dest="config_env",
                        default="production")
    parser.add_argument('--debug',
                        help="set debug level (0-4)",
                        dest="debug",
                        nargs="?",
                        const=0,
                        type=int)
    args = parser.parse_args()
    options = vars(args)

    config = load_config()
    config_env = config[args.config_env]["ldap"]
    _account_home_config = config[args.config_env].get("account_home")
    _auth_token = config[args.config_env].get("api_auth_token")
    _host = config[args.config_env].get("host")
    _port = config[args.config_env].get("port")
    _https = config[args.config_env].get("https")
    _protocol = 'https' if _https else 'http'
    _url = "%s://%s:%s/" % (
        _protocol, _host, _port) if _port else "%s://%s/" % (_protocol, _host)
    _json_headers = {
        "Accept": "application/json",
        "Content-Type": "application/json",
        "Authorization": "Token token=%s" % _auth_token,
    }

    # Setup logging
    setup_logging(debug=args.debug, noop=False)

    logger.debug4("OPTIONS: %s" % options)
    logger.debug4("CONFIG: %s" % config_env)

    _ldap_url = config_env.get("url")
    _use_tls = config_env.get("tls")
    _bind_dn = config_env.get("bind_dn", None)
    _bind_pass = config_env.get("bind_pass", None)

    group_search_base = "ou=Groups,dc=brazos,dc=tamu,dc=edu"
    user_search_base = "ou=People,dc=brazos,dc=tamu,dc=edu"
    new_group_filter = "cn=%s" % args.new_group
    old_group_filter = "cn=%s" % args.old_group
    user_filter = "uid=%s" % args.username
    group_attribs = [
        "dn",
        "cn",
        "gidNumber",
        "uniqueMember",
        "slurmAccountName",
    ]
    user_attribs = [
        "dn",
        "uid",
        "gidNumber",
    ]
    scope = "one"

    local_ldap = LocalLdap(url=_ldap_url[0],
                           use_tls=_use_tls,
                           bind_dn=_bind_dn,
                           bind_pass=_bind_pass,
                           log_level=None)
    new_group_results = local_ldap.paged_search(base=group_search_base,
                                                sfilter=new_group_filter,
                                                attrlist=group_attribs,
                                                scope=scope)
    old_group_results = local_ldap.paged_search(base=group_search_base,
                                                sfilter=old_group_filter,
                                                attrlist=group_attribs,
                                                scope=scope)
    user_results = local_ldap.paged_search(base=user_search_base,
                                           sfilter=user_filter,
                                           attrlist=user_attribs,
                                           scope=scope)

    logger.debug("LDAP new group: %s", json.dumps(new_group_results))
    logger.debug("LDAP old group: %s", json.dumps(old_group_results))
    logger.debug("LDAP user: %s", json.dumps(user_results))

    if len(new_group_results) != 1 or len(old_group_results) != 1:
        logger.error("Incorrect number of LDAP group results returned")
        sys.exit(1)
    if len(user_results) != 1:
        logger.error("Incorrect number of LDAP user results returned")
        sys.exit(1)

    ldap_group_new = LdapGroup()
    ldap_group_new.setattrs(data=new_group_results[0],
                            listvals=["uniqueMember"])
    ldap_group_old = LdapGroup()
    ldap_group_old.setattrs(data=old_group_results[0],
                            listvals=["uniqueMember"])
    ldap_user = LdapUser()
    ldap_user.setattrs(data=user_results[0])

    # Not all LDAP groups have slurmAccountName attribute
    if not hasattr(ldap_group_new, 'slurmAccountName'):
        ldap_group_new.slurmAccountName = ldap_group_new.cn
    if not hasattr(ldap_group_old, 'slurmAccountName'):
        ldap_group_old.slurmAccountName = ldap_group_old.cn

    # Check certain things exist to avoid sending None to LDAP which could delete more than we want
    _ldap_group_new_valid = True
    _ldap_group_old_valid = True
    for a in ['dn', 'gidNumber', 'slurmAccountName', 'uniqueMember', 'cn']:
        if not hasattr(ldap_group_new, a):
            _ldap_group_new_valid = False
        elif getattr(ldap_group_new, a) is None:
            _ldap_group_new_valid = False
        if not hasattr(ldap_group_old, a):
            _ldap_group_old_valid = False
        elif getattr(ldap_group_old, a) is None:
            _ldap_group_old_valid = False
    if not _ldap_group_new_valid:
        logger.error("LDAP group %s does not have all necessary information",
                     args.new_group)
    if not _ldap_group_old_valid:
        logger.error("LDAP group %s does not have all necessary information",
                     args.old_group)
    if ldap_user.dn is None or ldap_user.uid is None or ldap_user.gidNumber is None:
        logger.error("LDAP user %s does not have all necessary information",
                     args.username)

    ## Update account management database
    get_old_group_params = {
        "name": args.old_group,
    }
    old_group_data = actmgr_api.get_groups(_url, _json_headers,
                                           get_old_group_params)
    old_group = old_group_data[0]
    logger.debug("Old Group API data: %s", json.dumps(old_group))

    get_new_group_params = {
        "name": args.new_group,
    }
    new_group_data = actmgr_api.get_groups(_url, _json_headers,
                                           get_new_group_params)
    new_group = new_group_data[0]
    logger.debug("New Group API data: %s", json.dumps(new_group))

    get_account_params = {
        "username": args.username,
    }
    account_data = actmgr_api.get_accounts(_url, _json_headers,
                                           get_account_params)
    account = account_data[0]
    logger.debug("Account API data: %s", json.dumps(account))

    update_account_data = {
        "primary_group_id": new_group['id'],
    }
    # Handle cases where person is assigned old group in both primary and auxiliary groups
    group_ids = []
    for g in account["groups"]:
        if g["id"] == old_group["id"]:
            group_ids.append(new_group["id"])
        else:
            group_ids.append(g["id"])
    if group_ids:
        update_account_data['group_ids'] = group_ids

    account = actmgr_api.update_account(_url, _json_headers, account['id'],
                                        update_account_data)
    if not account or not account["account"]:
        logger.error("Failed to update account management data")
        sys.exit(1)
    logger.debug("Account updated API data: %s", json.dumps(account))
    account = account["account"]

    ## Update LDAP
    if ldap_user.gidNumber != ldap_group_new.gidNumber:
        logger.info("LDAP replace %s gidNumber=%s", ldap_user.dn,
                    ldap_group_new.gidNumber)
        local_ldap.modify(
            ldap_user.dn,
            [(ldap.MOD_REPLACE, 'gidNumber', ldap_group_new.gidNumber)])
    else:
        logger.warn("Skipping LDAP update of user gidNumber - already updated")

    if ldap_user.dn not in ldap_group_new.uniqueMember:
        logger.info("LDAP add to %s uniqueMember=%s", ldap_group_new.dn,
                    ldap_user.dn)
        local_ldap.modify(ldap_group_new.dn,
                          [(ldap.MOD_ADD, "uniqueMember", ldap_user.dn)])
    else:
        logger.warn(
            "Skipping LDAP update of group add uniqueMember - already updated")

    if ldap_user.dn in ldap_group_old.uniqueMember:
        logger.info("LDAP delete from %s uniqueMember=%s", ldap_group_old.dn,
                    ldap_user.dn)
        local_ldap.modify(ldap_group_old.dn,
                          [(ldap.MOD_DELETE, "uniqueMember", ldap_user.dn)])
    else:
        logger.warn(
            "Skipping LDAP update of group delete uniqueMember - already updated"
        )

    ## Update SLURM
    _slurm_account = account["primary_group"]["alias"]
    _slurm_accounts = [g["alias"] for g in account["groups"] if "alias" in g]
    if _slurm_account not in _slurm_accounts:
        _slurm_accounts.append(_slurm_account)

    if not _slurm_account or not _slurm_accounts:
        logger.error("SLURM accounts not correctly determined")
        sys.exit(1)

    sacctmgr_check_args = [
        "--parsable2",
        "--noheader",
        "show",
        "user",
        "name=%s" % args.username,
        "account=%s" % _slurm_account,
        "format=User,DefaultAccount,Account",
        "WithAssoc",
    ]
    logger.debug("Executing: sacctmgr %s", " ".join(sacctmgr_check_args))
    try:
        output = sacctmgr(sacctmgr_check_args)
    except ErrorReturnCode:
        logger.error("FAILED to check if SLURM account already exists.")
        sys.exit(1)
    expected_output = "%s|%s|%s" % (args.username, _slurm_account,
                                    _slurm_account)
    existing_slurm_accounts = output.split(os.linesep)
    if expected_output not in existing_slurm_accounts:
        sacctmgr_delete_args = [
            "-i", "delete", "user", "where",
            "name=%s" % args.username,
            "account=%s" % ldap_group_old.slurmAccountName
        ]
        logger.debug("Executing: sacctmgr %s", " ".join(sacctmgr_delete_args))
        try:
            output = sacctmgr(sacctmgr_delete_args)
        except ErrorReturnCode:
            logger.error("FAILED to delete user from SLURM.")
            sys.exit(1)

        sacctmgr_create_args = [
            "-i",
            "create",
            "user",
            args.username,
            "account=%s" % ",".join(_slurm_accounts),
            "defaultaccount=%s" % _slurm_account,
        ]
        logger.debug("Executing: sacctmgr %s", " ".join(sacctmgr_create_args))
        try:
            output = sacctmgr(sacctmgr_create_args)
        except ErrorReturnCode:
            logger.error("FAILED to retrieve all user names from SLURM.")
            sys.exit(1)
    else:
        logger.warn(
            "Skipping SLURM account modifications - record already exists")

    ## Update permissions of $HOME and $SCRATCH
    home_path = os.path.join(_account_home_config.get("base_dir"),
                             args.username)
    scratch_path = os.path.join(_account_home_config.get("scratch_base"),
                                args.username)
    find_home_args = [
        home_path, "-group", args.old_group, "-exec", "chgrp", args.new_group,
        '{}', ';'
    ]
    logger.info("Changing group ownership of files under %s", home_path)
    logger.debug("Executing: find %s", " ".join(find_home_args))
    try:
        find(find_home_args)
    except ErrorReturnCode, e:
        logger.error("Failed to fix permissions of %s: %s", home_path,
                     e.stderr)
        sys.exit(1)
Exemple #23
0
# -*- coding: utf-8 -*-
"""The database methods
"""

from __future__ import unicode_literals

import pymongo
import gridfs

from lib.config import load_config

CONFIG = load_config()

MONGO_URI = CONFIG.get('MONGO_URI', 'mongodb://localhost:27017')
MONGO_DATABASE = 'randomery'
MONGO_DATA_COLLECTION = 'desktopdata'
MONGO_MOBILE_DATA_COLLECTION = 'mobiledata'
MONGO_USERS_COLLECTION = 'users'
MONGO_POOL_COLLECTION = 'pool'


def db_connect():
    """
        Connect to the mongo host.

        This begin a new connection with a mongo uri. The mongo uri is defined
        through the MONGO_URI variable.

        :return: A mongo connection
        :rtype: MongoClient
    """
Exemple #24
0
from lib import config as cnf
config = cnf.load_config('config.yaml')

        default=30,
        help='Length of the sub-sequence,'
        'we set it to 30 for 4D completion and 20 for future prediction.')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        help='Do not use cuda.')
    parser.add_argument('--g', type=str, default='0', help='gpu id')
    args = parser.parse_args()

    assert args.experiment in ['temporal', 'spatial', 'future']
    if args.experiment == 'future':
        args.seq_length = 20

    os.environ['CUDA_VISIBLE_DEVICES'] = args.g

    cfg = config.load_config(args.config, 'configs/default.yaml')
    is_cuda = (torch.cuda.is_available() and not args.no_cuda)
    device = torch.device("cuda" if is_cuda else "cpu")

    transf_pt = data.SubsamplePointsSeq(
        cfg['data']['n_training_points'],
        random=True,
        spatial_completion=True if args.experiment == 'spatial' else False)
    fields = {
        'points':
        data.PointsSubseqField(cfg['data']['points_iou_seq_folder'],
                               all_steps=True,
                               seq_len=args.seq_length,
                               unpackbits=cfg['data']['points_unpackbits'],
                               transform=transf_pt,
                               scale_type=cfg['data']['scale_type'],
Exemple #26
0
        net, end_points = resnet_v2(inputs,
                                    blocks,
                                    is_training=is_training,
                                    global_pool=False,
                                    include_root_block=True,
                                    scope=self._scope)
        return net, end_points


if __name__ == '__main__':
    import sys

    from lib.tf_utils import print_endpoints
    from lib.config import load_config

    cfg = load_config()

    img_file = '/home/cwq/data/MLT2017/val/img_773.jpg'
    # img_file = '/home/cwq/ssd_data/more_bg_corpus/val/00000000.jpg'

    cfg.lr_boundaries = [1]
    cfg.lr_values = [1, 2]

    res_net = ResNetV2(cfg, 62)
    res_net.create_architecture()
    print_endpoints(res_net, img_file)
    for k, v in res_net.end_points.items():
        stride = int(641 / v.shape.as_list()[2])
        print("%s, stride %d, shape %s" % (k, stride, v.shape.as_list()))
Exemple #27
0
import sys
import base64
import ConfigParser
import requests
import json
import logging
import re
from urlparse import urljoin
import prettytable

BASE_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
from lib.config import load_config
from lib.logs import setup_logging

configs = load_config()
config = configs['production']['pulp']

hostname = config.get("hostname", "localhost")
username = config.get("username", "admin")
password = config.get("password", "password")

logger = logging.getLogger()
setup_logging()

auth_str = "%s:%s" % (username, password)
auth_encoded = base64.b64encode(auth_str)
auth = "Basic %s" % auth_encoded

rest_headers = {
    'Content-Type': 'application/json',
Exemple #28
0
def main():
    globals.DOH = DOH()

    curpath = os.path.basename(os.path.dirname(__file__))
    if curpath == '' or curpath is None:
        curpath = '.'

    parser = argparse.ArgumentParser()
    parser.add_argument('-f',
                        action='store',
                        dest='conffile',
                        help='Specifies config file',
                        default='%s/config.yaml' % curpath,
                        type=str)

    parser.add_argument(
        '-d',
        action='store_true',
        dest='daemon',
        help='Fork process into background (not available on Windows)',
        default=False)

    args = parser.parse_args()

    # load config
    globals.config = cnf.load_config(args.conffile)

    if not hasattr(globals.config.default, "doh_urls") and type(
            globals.config.default.doh_urls) != list:
        print("ERROR: No doh_urls defined in default config!")
        sys.exit(2)

    # fork into background
    if os.name == 'posix' and args.daemon:
        p = os.fork()
        if p > 0:
            sys.exit(0)
        elif p == -1:
            print("ERROR: Couldn't fork()!")
            sys.exit(1)

    sock_server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    sock_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    try:
        sock_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
    except AttributeError:
        pass  # Some systems don't support SO_REUSEPORT

    try:
        sock_server.bind((globals.config.service.get('listen_address',
                                                     '0.0.0.0'),
                          globals.config.service.get('listen_port', 53)))
        print("Listening on:",
              globals.config.service.get('listen_address', '0.0.0.0'),
              globals.config.service.get('listen_port', 53))
    except Exception as ex:
        print("Couldn't listen on %s:%s\n%s" %
              (globals.config.service.get('listen_address', '0.0.0.0'),
               globals.config.service.get('listen_port', 53), str(ex)))
        sys.exit(1)

    drop_privs()

    while True:
        msg, client = sock_server.recvfrom(1024)

        newthread = UDPThread(client, msg, sock_server)
        newthread.setDaemon(True)
        newthread.start()
Exemple #29
0
    # handle result
    def return_true(payload, data):
        self.result.append("Payload : %s\nParams : " % payload + data)
        return

    def vul_verify(self, test, location):
        """ """
        threads = []
        for i in self.payloads[location]:
            threads.append(
                Thread(self.single_payload_verify, (test, location, i)))
        for i in threads:
            i.start()
        for i in threads:
            i.join()


if __name__ == "__main__":
    from lib.redisopt import redisCli
    from lib.config import load_config
    from lib.scanner.request import Request
    ''''''
    load_config()
    redisCli.build_connection()
    r = Request(redisCli.retrieve_request(b"7554697de81997581ca6e5bcfc850cd6"))
    xss_scanner = XssScan(r)
    result = xss_scanner.scan()
    print(result)

    #
def run():
    import time
    x = tf.random.uniform([1, 64, 64, 3])
    s = time.time()
    rec_loss, kld_loss, pos_loss, modelout = model(
        x,
        trees,
        None,
        alpha=configs.alpha_ub,
        ifmask=True,
        maskweight=configs.maskweight)
    print(time.time() - s)


if __name__ == "__main__":
    config_dic = load_config("./configs/pnp_net_configs.yaml")
    configs = Struct(**config_dic)
    # hiddim=configs.hiddim
    # latentdim=configs.latentdim
    # word_size=[configs.word_size, configs.word_size,configs.latentdim]
    # pos_size=[1, 1,8]
    # nres=configs.nr_resnet
    # nlayers=4
    # nonlinear='elu'
    dictionary = [
        'brown', 'cylinder', 'cube', 'left-front', 'yellow', 'sphere', 'right',
        'right-front', 'right-behind', 'cyan', 'blue', 'gray', 'rubber',
        'purple', 'metal', 'left-behind', 'green', 'red', 'left', 'small',
        'large'
    ]
    # dictionary=train_loader.dictionary
Exemple #31
0
def main():
    args = parser.parse_args()

    config_dic = load_config(args.config_path)
    configs = Struct(**config_dic)

    assert (torch.cuda.is_available())  # assume CUDA is always available

    print('configurations:', configs)

    torch.cuda.set_device(configs.gpu_id)
    torch.manual_seed(configs.seed)
    torch.cuda.manual_seed(configs.seed)
    np.random.seed(configs.seed)
    random.seed(configs.seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    configs.exp_dir = configs.project_dir + '/results/' + configs.data_folder + '/' + configs.exp_dir_name
    exp_dir = configs.exp_dir

    try:
        os.makedirs(configs.exp_dir)
    except:
        pass
    try:
        os.makedirs(osp.join(configs.exp_dir, 'samples'))
    except:
        pass
    try:
        os.makedirs(osp.join(configs.exp_dir, 'checkpoints'))
    except:
        pass

    # loaders
    if 'CLEVR' in configs.data_folder:
        # we need the module's label->index dictionary from train loader
        train_loader = CLEVRTREE(phase='train',
                                 base_dir=osp.join(configs.base_dir,
                                                   configs.data_folder),
                                 batch_size=configs.batch_size,
                                 random_seed=configs.seed,
                                 shuffle=True)
        test_loader = CLEVRTREE(phase='test',
                                base_dir=osp.join(configs.base_dir,
                                                  configs.data_folder),
                                batch_size=configs.batch_size,
                                random_seed=configs.seed,
                                shuffle=False)
        gen_loader = CLEVRTREE(phase='test',
                               base_dir=osp.join(configs.base_dir,
                                                 configs.data_folder),
                               batch_size=configs.batch_size,
                               random_seed=configs.seed,
                               shuffle=False)
    elif 'COLORMNIST' in configs.data_folder:
        train_loader = COLORMNISTTREE(phase='train',
                                      directory=configs.base_dir,
                                      folder=configs.data_folder,
                                      batch_size=configs.batch_size,
                                      random_seed=configs.seed,
                                      shuffle=True)
        test_loader = CLEVRTREE(phase='test',
                                base_dir=osp.join(configs.base_dir,
                                                  configs.data_folder),
                                batch_size=configs.batch_size,
                                random_seed=configs.seed,
                                shuffle=False)
        gen_loader = COLORMNISTTREE(phase='test',
                                    directory=configs.base_dir,
                                    folder=configs.data_folder,
                                    batch_size=configs.batch_size,
                                    random_seed=configs.seed,
                                    shuffle=False)
    else:
        raise ValueError('invalid dataset folder name {}'.format(
            configs.data_folder))

    # hack, parameter
    im_size = gen_loader.im_size[2]

    # model
    model = PNPNet(
        hiddim=configs.hiddim,
        latentdim=configs.latentdim,
        word_size=[configs.latentdim, configs.word_size, configs.word_size],
        pos_size=[8, 1, 1],
        nres=configs.nr_resnet,
        nlayers=4,
        nonlinear='elu',
        dictionary=train_loader.dictionary,
        op=[configs.combine_op, configs.describe_op],
        lmap_size=im_size // 2**configs.ds,
        downsample=configs.ds,
        lambdakl=-1,
        bg_bias=configs.bg_bias,
        normalize=configs.normalize,
        loss=configs.loss,
        debug_mode=False)

    if configs.checkpoint is not None and len(configs.checkpoint) > 0:
        model.load_state_dict(torch.load(configs.checkpoint))
        print('load model from {}'.format(configs.checkpoint))
    else:
        model.apply(weights_init)

    if configs.mode == 'train':
        train(model, train_loader, test_loader, gen_loader, configs=configs)
    elif configs.mode == 'single_test':
        print(exp_dir)
        print('Start generating...')
        generate_single(model,
                        gen_loader=gen_loader,
                        num_sample=configs.num_samples,
                        target_dir=exp_dir)
    elif configs.mode == 'test':
        print(exp_dir)
        print('Start generating...')
        generate(model,
                 gen_loader=gen_loader,
                 num_sample=configs.num_samples,
                 target_dir=exp_dir)
    elif configs.mode == 'visualize':
        print(exp_dir)
        print('Start visualizing...')
        visualize(model, num_sample=50, target_dir=exp_dir)
    elif configs.mode == 'sample':
        print('Sampling')
        if configs.all_combinations:
            sample_all_combinations_tree(model,
                                         test_loader=gen_loader,
                                         tree_idx=configs.tree_idx,
                                         base_dir=exp_dir,
                                         num_sample=configs.num_samples)
        else:
            sample_tree(model,
                        test_loader=gen_loader,
                        tree_idx=configs.tree_idx,
                        base_dir=exp_dir,
                        num_sample=configs.num_samples)
    else:
        raise ValueError('Wrong mode given:{}'.format(configs.mode))
Exemple #32
0
"""
Main code for the preprocess_data node
"""

import os

from lib.config import load_config
from processes.preprocess_data.process_house_data import process_house_data

PATH_CONFIG = os.getenv("PATH_CONFIG")

if __name__ == "__main__":

    config = load_config(PATH_CONFIG)
    process_house_data(config=config)
Exemple #33
0
import shutil
import multiprocessing

from tqdm import tqdm
from itertools import repeat
from urllib.request import Request, urlopen
from urllib.error import HTTPError

from lib.config import load_config

CONFIG = "./experiments/009.yaml"
MAX_FRAMES = 10000
HEADERS = {'User-Agent': 'Mozilla/5.0'}
FORCE_REDOWNLOAD = False

cfg = load_config(CONFIG)
remote_path = cfg['paths']['remote_path']


def download_case(case, data_path):
    case = case.strip()  # Get rid of linebreaks etc.
    if '.png' in case:  # Remove frame number and file extension if present
        case = case.rsplit('-', 1)[0]
    subdir1 = case.split('-', 1)[0]
    subdir2, subdir3 = case[3:5], case[5:7]

    case_folder = os.path.join(data_path, case)
    if not os.path.exists(case_folder):
        exists = False
        os.makedirs(case_folder)
    else:
Exemple #34
0
def init():
    version_check()
    check_requirements()
    load_config()
    redis_connection_check()
    poc_reset()
def main():
    args = parse_args()
    config = load_config()
    config_env = config[args.config_env]["ldap"]

    # Setup logging
    setup_logging(debug=args.debug, noop=False)

    logger.debug4("OPTIONS: %s" % vars(args))
    logger.debug4("CONFIG: %s" % config_env)

    _ldap_url = config_env.get("url")
    _use_tls = config_env.get("tls")
    _bind_dn = config_env.get("bind_dn", None)
    _bind_pass = config_env.get("bind_pass", None)

    local_ldap = LocalLdap(url=_ldap_url[0], use_tls=_use_tls, bind_dn=_bind_dn, bind_pass=_bind_pass, log_level=None)
    ldap_users = local_ldap.paged_search(base=search_base, sfilter=search_filter, attrlist=search_return_attribs, scope=search_scope)

    users_over_quota = []
    users_over_ldap_quota = []
    users_over_zfs_quota = []
    users_ldap_quota_mismatch = []
    zfs_set_cmds = []

    for user in ldap_users:
        _user_data = {}
        _user = LdapUser()
        _user.setattrs(data=user, listvals=["mail"])
        _username = _user.uid
        _uid = _user.uidNumber
        _shell = _user.loginShell
        _quota = _user.quota
        if hasattr(_user, "mail"):
            _mail = ",".join(_user.mail)
        else:
            _mail = ""

        if active_only and _shell != "/bin/bash":
            continue

        mount, softlimit, hardlimit, softinode, hardinode = re.findall(r"^(.*):([0-9]+),([0-9]+),([0-9]+),([0-9]+)$", _quota)[0]
        _ldap_quota = int(hardlimit) * 1024
        zfs_fs = "tank%s" % mount

        # Get current ZFS quota
        userquota_args = ["get", "-H", "-p", "-o", "value", "userquota@%s" % _username, zfs_fs]
        logger.debug("Executing: zfs %s", " ".join(userquota_args))
        userquota_output = zfs(userquota_args)
        _userquota = userquota_output.strip()
        if _userquota != "-":
            current_quota = int(_userquota)
        else:
            current_quota = 0

        # Get current used space
        userused_args = ["get", "-H", "-p", "-o", "value", "userused@%s" % _username, zfs_fs]
        logger.debug("Executing: zfs %s", " ".join(userused_args))
        userused_output = zfs(userused_args)
        _userused = userused_output.strip()
        if _userused != "-":
            current_used = int(_userused)
        else:
            current_used = 0

        _user_data["username"] = _username
        _user_data["uid"] = _uid
        _user_data["mail"] = _mail
        _user_data["zfs_fs"] = zfs_fs
        _user_data["ldap_quota"] = _ldap_quota
        _user_data["zfs_quota"] = current_quota
        _user_data["zfs_used"] = current_used

        if current_used >= _ldap_quota and current_used >= current_quota:
            users_over_quota.append(_user_data)
        elif current_used and current_used >= _ldap_quota:
            users_over_ldap_quota.append(_user_data)
        elif current_used and current_used >= current_quota:
            users_over_zfs_quota.append(_user_data)

        if _ldap_quota != current_quota:
            users_ldap_quota_mismatch.append(_user_data)
            zfs_set_cmd = [
                "set", "userquota@%s=%s" % (_username, _ldap_quota), zfs_fs
            ]
            zfs_set_cmds.append(zfs_set_cmd)

    for user in users_over_quota:
        print_data("WARNING: over quota", user)
    print "---------"

    for user in users_over_ldap_quota:
        print_data("WARNING: over LDAP quota", user)
    print "---------"

    for user in users_over_zfs_quota:
        print_data("WARNING: over ZFS quota", user)
    print "---------"

    for user in users_ldap_quota_mismatch:
        print_data("WARNING: quota does not match LDAP", user)
    print "---------"

    for zfs_set_cmd in zfs_set_cmds:
        logger.debug("Executing: zfs %s", " ".join(zfs_set_cmd))
        if args.noop:
            pass
        else:
            try:
                zfs_set_output = zfs(zfs_set_cmd)
            except ErrorReturnCode:
                logger.error("FAILED to execute zfs set: %s", zfs_set_output)
Exemple #36
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', '--debug', action='store_true', help='Enable debugging mode. Do *NOT* do this on production systems as it allows RCE.')
    parser.add_argument('--host', default='0.0.0.0', type=str, help='Host to listen on.')
    parser.add_argument('--port', default=9002, type=int, help='Port to listen on.')
    parser.add_argument('-s', '--settings', type=str, help='Settings file.')

    args = parser.parse_args()

    if args.debug:
        logging.basicConfig(level=logging.DEBUG)
    else:
        logging.basicConfig(level=logging.INFO)

    if args.settings:
        load_config(args.settings)

    required = 'iso-dir', 'hdd-dir', 'vms-dir'
    for row in required:
        if not config(row):
            log.error('The %r value is missing in your configuration! '
                      'Please provide it and run VBoxRPC again.', row)
            exit(1)

        path = config(row)
        if not os.path.isdir(path):
            log.info('Creating directory %r', path)
            os.makedirs(path)

    app = create_app(debug=args.debug)
    app.run(host=args.host, port=args.port)