def __init__(self): dckr = docker.Client() cfg = read_config() backup_location = os.path.expanduser(cfg['backup_location']) user_home_img = os.path.expanduser(cfg['user_home_image']) mnt_location = os.path.expanduser(cfg['mnt_location']) cloud_cfg = cfg['cloud_host'] backup_bucket = cloud_cfg['backup_bucket'] make_sure_path_exists(backup_location) CloudHelper.configure(has_s3=cloud_cfg['s3'], has_dynamodb=cloud_cfg['dynamodb'], has_cloudwatch=cloud_cfg['cloudwatch'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) JBoxContainer.configure(dckr, cfg['docker_image'], cfg['mem_limit'], cfg['cpu_limit'], cfg['disk_limit'], [os.path.join(mnt_location, '${DISK_ID}')], mnt_location, backup_location, user_home_img, cfg['numlocalmax'], cfg["numdisksmax"], backup_bucket=backup_bucket) # backup user files every 1 hour # check: configured expiry time must be at least twice greater than this self.run_interval = int(cfg['delete_stopped_timeout']) / 2 if self.run_interval < 3 * 60: self.run_interval = 3 * 60 self.delete_stopped_timeout = int(cfg['delete_stopped_timeout']) self.log_info("Backup interval: " + str(self.run_interval / 60) + " minutes") self.log_info("Stopped containers would be deleted after " + str(self.delete_stopped_timeout / 60) + " minutes")
def __init__(self): dckr = docker.Client() cfg = read_config() cloud_cfg = cfg['cloud_host'] user_activation_cfg = cfg['user_activation'] LoggerMixin.setup_logger(level=cfg['root_log_level']) LoggerMixin.DEFAULT_LEVEL = cfg['jbox_log_level'] db.configure_db(cfg) CloudHost.configure(has_s3=cloud_cfg['s3'], has_dynamodb=cloud_cfg['dynamodb'], has_cloudwatch=cloud_cfg['cloudwatch'], has_autoscale=cloud_cfg['autoscale'], has_route53=cloud_cfg['route53'], has_ebs=cloud_cfg['ebs'], has_ses=cloud_cfg['ses'], scale_up_at_load=cloud_cfg['scale_up_at_load'], scale_up_policy=cloud_cfg['scale_up_policy'], autoscale_group=cloud_cfg['autoscale_group'], route53_domain=cloud_cfg['route53_domain'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) VolMgr.configure(dckr, cfg) JBoxContainer.configure(dckr, cfg['docker_image'], cfg['mem_limit'], cfg['cpu_limit'], cfg['numlocalmax'], cfg['async_job_port'], async_mode=JBoxAsyncJob.MODE_SUB) self.log_debug("Backup daemon listening on port: " + str(cfg['async_job_port'])) self.queue = JBoxContainer.ASYNC_JOB JBoxd.MAX_ACTIVATIONS_PER_SEC = user_activation_cfg['max_activations_per_sec'] JBoxd.MAX_AUTO_ACTIVATIONS_PER_RUN = user_activation_cfg['max_activations_per_run'] JBoxd.ACTIVATION_SUBJECT = user_activation_cfg['mail_subject'] JBoxd.ACTIVATION_BODY = user_activation_cfg['mail_body'] JBoxd.ACTIVATION_SENDER = user_activation_cfg['sender']
def __init__(self): cfg = JBox.cfg = read_config() dckr = docker.Client() cloud_cfg = cfg['cloud_host'] JBoxHandler.configure(cfg) JBoxDB.configure(cfg) if 'jbox_users_v2' in cloud_cfg: JBoxUserV2.NAME = cloud_cfg['jbox_users_v2'] if 'jbox_invites' in cloud_cfg: JBoxInvite.NAME = cloud_cfg['jbox_invites'] if 'jbox_accounting_v2' in cloud_cfg: JBoxAccountingV2.NAME = cloud_cfg['jbox_accounting_v2'] CloudHelper.configure(has_s3=cloud_cfg['s3'], has_dynamodb=cloud_cfg['dynamodb'], has_cloudwatch=cloud_cfg['cloudwatch'], has_autoscale=cloud_cfg['autoscale'], has_route53=cloud_cfg['route53'], scale_up_at_load=cloud_cfg['scale_up_at_load'], scale_up_policy=cloud_cfg['scale_up_policy'], autoscale_group=cloud_cfg['autoscale_group'], route53_domain=cloud_cfg['route53_domain'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) backup_location = os.path.expanduser(cfg['backup_location']) user_home_img = os.path.expanduser(cfg['user_home_image']) mnt_location = os.path.expanduser(cfg['mnt_location']) backup_bucket = cloud_cfg['backup_bucket'] make_sure_path_exists(backup_location) JBoxContainer.configure(dckr, cfg['docker_image'], cfg['mem_limit'], cfg['cpu_limit'], cfg['disk_limit'], [os.path.join(mnt_location, '${DISK_ID}')], mnt_location, backup_location, user_home_img, cfg['numlocalmax'], cfg["numdisksmax"], backup_bucket=backup_bucket) self.application = tornado.web.Application([ (r"/", MainHandler), (r"/hostlaunchipnb/", AuthHandler), (r"/hostadmin/", AdminHandler), (r"/ping/", PingHandler), (r"/cors/", CorsHandler) ]) cookie_secret = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32)) self.application.settings["cookie_secret"] = cookie_secret self.application.settings["google_oauth"] = cfg["google_oauth"] self.application.listen(cfg["port"]) self.ioloop = tornado.ioloop.IOLoop.instance() # run container maintainence every 5 minutes run_interval = 5 * 60 * 1000 self.log_info("Container maintenance every " + str(run_interval / (60 * 1000)) + " minutes") self.ct = tornado.ioloop.PeriodicCallback(JBox.do_housekeeping, run_interval, self.ioloop)
def __init__(self): dckr = docker.Client() cfg = JBox.cfg = read_config() cloud_cfg = cfg['cloud_host'] LoggerMixin.setup_logger(level=cfg['root_log_level']) LoggerMixin.DEFAULT_LEVEL = cfg['jbox_log_level'] JBoxHandler.configure(cfg) db.configure_db(cfg) CloudHost.configure(has_s3=cloud_cfg['s3'], has_dynamodb=cloud_cfg['dynamodb'], has_cloudwatch=cloud_cfg['cloudwatch'], has_autoscale=cloud_cfg['autoscale'], has_route53=cloud_cfg['route53'], has_ebs=cloud_cfg['ebs'], has_ses=cloud_cfg['ses'], scale_up_at_load=cloud_cfg['scale_up_at_load'], scale_up_policy=cloud_cfg['scale_up_policy'], autoscale_group=cloud_cfg['autoscale_group'], route53_domain=cloud_cfg['route53_domain'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) VolMgr.configure(dckr, cfg) JBoxAsyncJob.configure(cfg) JBoxContainer.configure(dckr, cfg['docker_image'], cfg['mem_limit'], cfg['cpu_limit'], cfg['numlocalmax'], cfg['async_job_ports']) self.application = tornado.web.Application([ (r"/", MainHandler), (r"/hostlaunchipnb/", AuthHandler), (r"/hostadmin/", AdminHandler), (r"/ping/", PingHandler), (r"/cors/", CorsHandler) ]) cookie_secret = ''.join( random.choice(string.ascii_uppercase + string.digits) for x in xrange(32)) self.application.settings["cookie_secret"] = cookie_secret self.application.settings["google_oauth"] = cfg["google_oauth"] self.application.listen(cfg["port"]) self.ioloop = tornado.ioloop.IOLoop.instance() # run container maintainence every 5 minutes run_interval = 5 * 60 * 1000 self.log_info("Container maintenance every " + str(run_interval / (60 * 1000)) + " minutes") self.ct = tornado.ioloop.PeriodicCallback(JBox.do_housekeeping, run_interval, self.ioloop)
def __init__(self): dckr = docker.Client() cfg = JBox.cfg = read_config() cloud_cfg = cfg['cloud_host'] LoggerMixin.setup_logger(level=cfg['root_log_level']) LoggerMixin.DEFAULT_LEVEL = cfg['jbox_log_level'] JBoxHandler.configure(cfg) db.configure_db(cfg) CloudHost.configure(has_s3=cloud_cfg['s3'], has_dynamodb=cloud_cfg['dynamodb'], has_cloudwatch=cloud_cfg['cloudwatch'], has_autoscale=cloud_cfg['autoscale'], has_route53=cloud_cfg['route53'], has_ebs=cloud_cfg['ebs'], has_ses=cloud_cfg['ses'], scale_up_at_load=cloud_cfg['scale_up_at_load'], scale_up_policy=cloud_cfg['scale_up_policy'], autoscale_group=cloud_cfg['autoscale_group'], route53_domain=cloud_cfg['route53_domain'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) VolMgr.configure(dckr, cfg) JBoxAsyncJob.configure(cfg) JBoxContainer.configure(dckr, cfg['docker_image'], cfg['mem_limit'], cfg['cpu_limit'], cfg['numlocalmax'], cfg['async_job_ports']) self.application = tornado.web.Application([ (r"/", MainHandler), (r"/hostlaunchipnb/", AuthHandler), (r"/hostadmin/", AdminHandler), (r"/ping/", PingHandler), (r"/cors/", CorsHandler), (r"/hw/", HomeworkHandler) ]) cookie_secret = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32)) self.application.settings["cookie_secret"] = cookie_secret self.application.settings["google_oauth"] = cfg["google_oauth"] self.application.listen(cfg["port"]) self.ioloop = tornado.ioloop.IOLoop.instance() # run container maintainence every 5 minutes run_interval = 5 * 60 * 1000 self.log_info("Container maintenance every " + str(run_interval / (60 * 1000)) + " minutes") self.ct = tornado.ioloop.PeriodicCallback(JBox.do_housekeeping, run_interval, self.ioloop)
def __init__(self): cfg = JBox.cfg = read_config() dckr = docker.Client() cloud_cfg = cfg['cloud_host'] JBoxHandler.configure(cfg) JBoxDB.configure(cfg) if 'jbox_users_v2' in cloud_cfg: JBoxUserV2.NAME = cloud_cfg['jbox_users_v2'] if 'jbox_invites' in cloud_cfg: JBoxInvite.NAME = cloud_cfg['jbox_invites'] if 'jbox_accounting_v2' in cloud_cfg: JBoxAccountingV2.NAME = cloud_cfg['jbox_accounting_v2'] CloudHelper.configure(has_s3=cloud_cfg['s3'], has_dynamodb=cloud_cfg['dynamodb'], has_cloudwatch=cloud_cfg['cloudwatch'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) backup_location = os.path.expanduser(cfg['backup_location']) user_home_img = os.path.expanduser(cfg['user_home_image']) mnt_location = os.path.expanduser(cfg['mnt_location']) backup_bucket = cloud_cfg['backup_bucket'] make_sure_path_exists(backup_location) JBoxContainer.configure(dckr, cfg['docker_image'], cfg['mem_limit'], cfg['cpu_limit'], cfg['disk_limit'], [os.path.join(mnt_location, '${DISK_ID}')], mnt_location, backup_location, user_home_img, cfg['numlocalmax'], cfg["numdisksmax"], backup_bucket=backup_bucket) self.application = tornado.web.Application([ (r"/", MainHandler), (r"/hostlaunchipnb/", AuthHandler), (r"/hostadmin/", AdminHandler), (r"/ping/", PingHandler), (r"/cors/", CorsHandler) ]) cookie_secret = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32)) self.application.settings["cookie_secret"] = cookie_secret self.application.settings["google_oauth"] = cfg["google_oauth"] self.application.listen(cfg["port"]) self.ioloop = tornado.ioloop.IOLoop.instance() # run container maintainence every 5 minutes run_interval = 5 * 60 * 1000 self.log_info("Container maintenance every " + str(run_interval / (60 * 1000)) + " minutes") self.ct = tornado.ioloop.PeriodicCallback(JBox.do_housekeeping, run_interval, self.ioloop)
def __init__(self): dckr = docker.Client() cfg = read_config() backup_location = os.path.expanduser(cfg['backup_location']) user_home_img = os.path.expanduser(cfg['user_home_image']) mnt_location = os.path.expanduser(cfg['mnt_location']) cloud_cfg = cfg['cloud_host'] backup_bucket = cloud_cfg['backup_bucket'] make_sure_path_exists(backup_location) CloudHelper.configure(has_s3=cloud_cfg['s3'], has_dynamodb=cloud_cfg['dynamodb'], has_cloudwatch=cloud_cfg['cloudwatch'], has_autoscale=cloud_cfg['autoscale'], has_route53=cloud_cfg['route53'], scale_up_at_load=cloud_cfg['scale_up_at_load'], scale_up_policy=cloud_cfg['scale_up_policy'], autoscale_group=cloud_cfg['autoscale_group'], route53_domain=cloud_cfg['route53_domain'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) JBoxContainer.configure(dckr, cfg['docker_image'], cfg['mem_limit'], cfg['cpu_limit'], cfg['disk_limit'], [os.path.join(mnt_location, '${DISK_ID}')], mnt_location, backup_location, user_home_img, cfg['numlocalmax'], cfg["numdisksmax"], backup_bucket=backup_bucket) # backup user files every 1 hour # check: configured expiry time must be at least twice greater than this self.run_interval = int(cfg['delete_stopped_timeout']) / 2 if self.run_interval < 3 * 60: self.run_interval = 3 * 60 self.delete_stopped_timeout = int(cfg['delete_stopped_timeout']) self.log_info("Backup interval: " + str(self.run_interval / 60) + " minutes") self.log_info("Stopped containers would be deleted after " + str(self.delete_stopped_timeout / 60) + " minutes")
def __init__(self): dckr = docker.Client() cfg = read_config() cloud_cfg = cfg['cloud_host'] user_activation_cfg = cfg['user_activation'] LoggerMixin.setup_logger(level=cfg['root_log_level']) LoggerMixin.DEFAULT_LEVEL = cfg['jbox_log_level'] db.configure_db(cfg) CloudHost.configure(has_s3=cloud_cfg['s3'], has_dynamodb=cloud_cfg['dynamodb'], has_cloudwatch=cloud_cfg['cloudwatch'], has_autoscale=cloud_cfg['autoscale'], has_route53=cloud_cfg['route53'], has_ebs=cloud_cfg['ebs'], has_ses=cloud_cfg['ses'], scale_up_at_load=cloud_cfg['scale_up_at_load'], scale_up_policy=cloud_cfg['scale_up_policy'], autoscale_group=cloud_cfg['autoscale_group'], route53_domain=cloud_cfg['route53_domain'], region=cloud_cfg['region'], install_id=cloud_cfg['install_id']) VolMgr.configure(dckr, cfg) JBoxAsyncJob.configure(cfg) JBoxContainer.configure(dckr, cfg['docker_image'], cfg['mem_limit'], cfg['cpu_limit'], cfg['numlocalmax'], cfg['async_job_ports'], async_mode=JBoxAsyncJob.MODE_SUB) self.log_debug("Backup daemon listening on ports: %s", repr(cfg['async_job_ports'])) JBoxd.QUEUE = JBoxContainer.ASYNC_JOB JBoxd.MAX_ACTIVATIONS_PER_SEC = user_activation_cfg[ 'max_activations_per_sec'] JBoxd.MAX_AUTO_ACTIVATIONS_PER_RUN = user_activation_cfg[ 'max_activations_per_run'] JBoxd.ACTIVATION_SUBJECT = user_activation_cfg['mail_subject'] JBoxd.ACTIVATION_BODY = user_activation_cfg['mail_body'] JBoxd.ACTIVATION_SENDER = user_activation_cfg['sender']