server.change_timezone(timezone.area) time.tzset() except Exception, error: logger = logging.getLogger(__name__) logger.exception("Conexao com nimbus-manager falhou") systemprocesses.norm_priority_job( "Set system timezone", callable, timezone) def update_ntp_cron_file(timezone): def callable(timezone): try: server = ServerProxy(settings.NIMBUS_MANAGER_URL) server.generate_ntpdate_file_on_cron(timezone.ntp_server) except Exception, error: logger = logging.getLogger(__name__) logger.exception("Conexao com nimbus-manager falhou") systemprocesses.norm_priority_job( "Generate ntpdate cron file", callable, timezone) signals.connect_on( update_system_timezone, Timezone, post_save ) signals.connect_on( update_ntp_cron_file, Timezone, post_save )
# # def bacula_config_runs(self): # line = u"Run = Level=%s hourly at 00:%02d" %(self.level, self.minute) # return [line] # # def human_readable(self): # return u"De hora em hora aos %02d minutos. Backup %s" %(self.minute, # self.level) def update_schedule_file(schedule): name = schedule.bacula_name filename = path.join(settings.NIMBUS_SCHEDULES_DIR, name) render_to_file(filename, "schedule", name=name, runs=schedule.runs.all()) def remove_schedule_file(schedule): name = schedule.bacula_name filename = path.join(settings.NIMBUS_SCHEDULES_DIR, name) utils.remove_or_leave(filename) def update_schedule(run): update_schedule_file(run.schedule) signals.connect_on(update_schedule_file, Schedule, post_save) signals.connect_on(update_schedule, Run, post_save) signals.connect_on(remove_schedule_file, Schedule, post_delete) signals.connect_on(update_schedule, Run, post_delete)
render_to_file(filename, "bacula-dir", director_name=config.director_name, director_password=config.director_password, db_name=settings.DATABASES['bacula']['NAME'], db_user=settings.DATABASES['bacula']['USER'], db_password=settings.DATABASES['bacula']['PASSWORD'], computers_dir=settings.NIMBUS_COMPUTERS_DIR, filesets_dir=settings.NIMBUS_FILESETS_DIR, jobs_dir=settings.NIMBUS_JOBS_DIR, pools_dir=settings.NIMBUS_POOLS_DIR, schedules_dir=settings.NIMBUS_SCHEDULES_DIR, storages_dir=settings.NIMBUS_STORAGES_DIR) logger = logging.getLogger(__name__) logger.info("Arquivo de configuracao do director gerado com sucesso") def update_console_file(config): """Update bconsole file""" filename = settings.BCONSOLE_CONF render_to_file(filename, "bconsole", director_name=config.director_name, director_address=config.director_address, director_password=config.director_password, director_port=9101) logger = logging.getLogger(__name__) logger.info("Arquivo de configuracao do bconsole gerado com sucesso") signals.connect_on(update_director_file, Config, post_save) signals.connect_on(update_console_file, Config, post_save)
#signals.connect_on(update_pool_file, Procedure, post_save) #signals.connect_on(remove_pool_file, Procedure, post_delete) def pre_delete_procedure(procedure): #Execute on_remove de todos os job_tasks for r in procedure.job_tasks.all(): if r.creator: r.creator.on_remove(procedure) def change_job_tasks(sender, instance, action, reverse, model, pk_set, **kwargs): update_procedure_file(instance) def update_job_tasks(job_tasks): procedures = Procedure.objects.filter(active=True) for procedure in procedures: if procedure.job_tasks.all(): update_procedure_file(procedure) m2m_changed.connect(change_job_tasks, sender=Procedure.job_tasks.through) # signals.connect_on( offsiteconf_check, Procedure, pre_save) signals.connect_on(update_procedure_file, Procedure, post_save) signals.connect_on(update_job_tasks, JobTask, post_save) signals.connect_on(pre_delete_procedure, Procedure, pre_delete) signals.connect_on(remove_procedure_volumes, Procedure, post_delete) signals.connect_on(remove_procedure_file, Procedure, post_delete)
def update_system_timezone(timezone): def callable(timezone): try: server = ServerProxy(settings.NIMBUS_MANAGER_URL) server.change_timezone(timezone.area) time.tzset() except Exception, error: logger = logging.getLogger(__name__) logger.exception("Conexao com nimbus-manager falhou") systemprocesses.norm_priority_job("Set system timezone", callable, timezone) def update_ntp_cron_file(timezone): def callable(timezone): try: server = ServerProxy(settings.NIMBUS_MANAGER_URL) server.generate_ntpdate_file_on_cron(timezone.ntp_server) except Exception, error: logger = logging.getLogger(__name__) logger.exception("Conexao com nimbus-manager falhou") systemprocesses.norm_priority_job("Generate ntpdate cron file", callable, timezone) signals.connect_on(update_system_timezone, Timezone, post_save) signals.connect_on(update_ntp_cron_file, Timezone, post_save)
if device.storage.active: name = device.bacula_name filename = path.join(settings.NIMBUS_DEVICES_DIR, name) storagefile = path.join(settings.NIMBUS_STORAGES_DIR, name) utils.remove_or_leave(filename) utils.remove_or_leave(storagefile) def restart_bacula_storage(model): try: logger = logging.getLogger(__name__) configcheck.check_baculasd(settings.BACULASD_CONF) manager = xmlrpclib.ServerProxy(settings.NIMBUS_MANAGER_URL) stdout = manager.storage_restart() logger.info(stdout) except configcheck.ConfigFileError, error: logger.error('Bacula-sd error, not reloading') except Exception, error: logger.error("Reload bacula-sd error") signals.connect_on(update_storage_file, Storage, post_save) signals.connect_on(update_storage_devices, Storage, post_save) signals.connect_on(restart_bacula_storage, Storage, post_save) signals.connect_on(create_default_device, Storage, post_save) signals.connect_on(update_device_file, Device, post_save) signals.connect_on(restart_bacula_storage, Device, post_save) signals.connect_on(remove_device_file, Device, post_delete)
def __unicode__(self): return "%s (%s)" % (self.name, self.address) def update_computer_file(computer): """Computer update file""" if computer.active: name = computer.bacula_name filename = path.join( settings.NIMBUS_COMPUTERS_DIR, name) render_to_file(filename, "client", name=name, ip=computer.address, password=computer.password) def remove_computer_file(computer): """Computer remove file""" if computer.active: filename = path.join(settings.NIMBUS_COMPUTERS_DIR, computer.bacula_name) utils.remove_or_leave(filename) def generate_keys(computer): try: computer.crypto_info except CryptoInfo.DoesNotExist, error: key, cert, pem = keymanager.generate_all_keys(settings.NIMBUS_SSLCONFIG) info = CryptoInfo.objects.create(key=key, certificate=cert, pem=pem) computer.crypto_info = info signals.connect_on(generate_keys, Computer, pre_save) signals.connect_on(update_computer_file, Computer, post_save) signals.connect_on(remove_computer_file, Computer, post_delete)
director_name=config.director_name, director_password=config.director_password, db_name=settings.DATABASES['bacula']['NAME'], db_user=settings.DATABASES['bacula']['USER'], db_password=settings.DATABASES['bacula']['PASSWORD'], computers_dir=settings.NIMBUS_COMPUTERS_DIR, filesets_dir=settings.NIMBUS_FILESETS_DIR, jobs_dir=settings.NIMBUS_JOBS_DIR, pools_dir=settings.NIMBUS_POOLS_DIR, schedules_dir=settings.NIMBUS_SCHEDULES_DIR, storages_dir=settings.NIMBUS_STORAGES_DIR) logger = logging.getLogger(__name__) logger.info("Arquivo de configuracao do director gerado com sucesso") def update_console_file(config): """Update bconsole file""" filename = settings.BCONSOLE_CONF render_to_file(filename, "bconsole", director_name=config.director_name, director_address=config.director_address, director_password=config.director_password, director_port=9101) logger = logging.getLogger(__name__) logger.info("Arquivo de configuracao do bconsole gerado com sucesso") signals.connect_on(update_director_file, Config, post_save) signals.connect_on(update_console_file, Config, post_save)
render_to_file(filename, "fileset", name=name, files=files, includes=includes, excludes=excludes, verbose_name=verbose_name) def update_filters(wildcard): fileset = wildcard.fileset update_fileset_file(fileset) def remove_fileset_file(fileset): """remove FileSet file""" name = fileset.bacula_name filename = path.join(settings.NIMBUS_FILESETS_DIR, name) utils.remove_or_leave(filename) def update_filepath(filepath): update_fileset_file(filepath.fileset) signals.connect_on(update_fileset_file, FileSet, post_save) signals.connect_on(update_filepath, FilePath, post_save) signals.connect_on(update_filters, Wildcard, post_save) signals.connect_on(remove_fileset_file, FileSet, post_delete) signals.connect_on(update_filepath, FilePath, post_delete)
task_objects[0].save() def is_active(): offsite = Offsite.get_instance() return offsite.active class OffsiteGraphicsData(TheRealBaseModel): total = models.BigIntegerField(default=0, editable=False) used = models.BigIntegerField(default=0, editable=False) timestamp = models.DateTimeField(auto_now_add=True) def __unicode__(self): return "%s - %s de %s (%.2f%%)" % (self.timestamp.strftime( "%H:%M:%S %d/%m/%Y"), self.used, self.total, (self.used * 100 / self.total)) def update_pool_size(procedure): offsite_conf = Offsite.get_instance() if offsite_conf.active and offsite_conf.host != offsite_conf.AMZ_S3_HOST: procedure.pool_size = settings.DEFAULT_PROCEDURE_POOL_SIZE # TODO: change model field to integer else: procedure.pool_size = 0 signals.connect_on(update_offsite, Offsite, post_save) signals.connect_on(update_pool_size, Procedure, pre_save)
name = computer.bacula_name filename = path.join(settings.NIMBUS_COMPUTERS_DIR, name) render_to_file(filename, "client", name=name, ip=computer.address, password=computer.password) def remove_computer_file(computer): """Computer remove file""" if computer.active: filename = path.join(settings.NIMBUS_COMPUTERS_DIR, computer.bacula_name) utils.remove_or_leave(filename) def generate_keys(computer): try: computer.crypto_info except CryptoInfo.DoesNotExist, error: key, cert, pem = keymanager.generate_all_keys( settings.NIMBUS_SSLCONFIG) info = CryptoInfo.objects.create(key=key, certificate=cert, pem=pem) computer.crypto_info = info signals.connect_on(generate_keys, Computer, pre_save) signals.connect_on(update_computer_file, Computer, post_save) signals.connect_on(remove_computer_file, Computer, post_delete)
storage.address = interface.address storage.save(system_permission=True) logger = logging.getLogger(__name__) logger.info("Atualizando ip do storage") def update_nimbus_client_address(interface): from nimbus.computers.models import Computer # Ver nota nos imports iniciais computer = Computer.objects.get(id=1) # storage default computer.address = interface.address computer.save(system_permission=True) logger = logging.getLogger(__name__) logger.info("Atualizando ip do client nimbus") def get_nimbus_address(): from nimbus.config.models import Config # Ver nota nos imports iniciais config = Config.get_instance() if not config.director_address: return get_raw_network_interface_address() return config.director_address def get_raw_network_interface_address(): raw_iface = networkutils.get_interfaces()[0] return raw_iface.addr signals.connect_on(update_networks_file, NetworkInterface, post_save) signals.connect_on(update_director_address, NetworkInterface, post_save) signals.connect_on(update_storage_address, NetworkInterface, post_save) signals.connect_on(update_nimbus_client_address, NetworkInterface, post_save)
name) utils.remove_or_leave(filename) utils.remove_or_leave(storagefile) def restart_bacula_storage(model): try: logger = logging.getLogger(__name__) configcheck.check_baculasd(settings.BACULASD_CONF) manager = xmlrpclib.ServerProxy(settings.NIMBUS_MANAGER_URL) stdout = manager.storage_restart() logger.info(stdout) except configcheck.ConfigFileError, error: logger.error('Bacula-sd error, not reloading') except Exception, error: logger.error("Reload bacula-sd error") signals.connect_on( update_storage_file, Storage, post_save) signals.connect_on( update_storage_devices, Storage, post_save) signals.connect_on( restart_bacula_storage, Storage, post_save) signals.connect_on( create_default_device, Storage, post_save) signals.connect_on( update_device_file, Device, post_save) signals.connect_on( restart_bacula_storage, Device, post_save) signals.connect_on( remove_device_file, Device, post_delete)
# return [line] # # def human_readable(self): # return u"De hora em hora aos %02d minutos. Backup %s" %(self.minute, # self.level) def update_schedule_file(schedule): name = schedule.bacula_name filename = path.join(settings.NIMBUS_SCHEDULES_DIR, name) render_to_file(filename, "schedule", name=name, runs=schedule.runs.all()) def remove_schedule_file(schedule): name = schedule.bacula_name filename = path.join(settings.NIMBUS_SCHEDULES_DIR, name) utils.remove_or_leave(filename) def update_schedule(run): update_schedule_file(run.schedule) signals.connect_on(update_schedule_file, Schedule, post_save) signals.connect_on(update_schedule, Run, post_save) signals.connect_on(remove_schedule_file, Schedule, post_delete) signals.connect_on(update_schedule, Run, post_delete)
return u"%s - %s" % (self.fileset.name, self.path) class Meta: verbose_name = u"Arquivo" def update_fileset_file(fileset): """FileSet update filesets to a procedure instance""" name = fileset.bacula_name filename = path.join(settings.NIMBUS_FILESETS_DIR, name) files = [f.path for f in fileset.files.all()] render_to_file(filename, "fileset", name=name, files=files) def remove_fileset_file(fileset): """remove FileSet file""" name = fileset.bacula_name filename = path.join(settings.NIMBUS_FILESETS_DIR, name) utils.remove_or_leave(filename) def update_filepath(filepath): update_fileset_file(filepath.fileset) signals.connect_on(update_fileset_file, FileSet, post_save) signals.connect_on(update_filepath, FilePath, post_save) signals.connect_on(remove_fileset_file, FileSet, post_delete) signals.connect_on(update_filepath, FilePath, post_delete)
if task_objects[0].active != offsite.active: task_objects[0].active = offsite.active task_objects[0].save() def is_active(): offsite = Offsite.get_instance() return offsite.active class OffsiteGraphicsData(TheRealBaseModel): total = models.BigIntegerField(default=0, editable=False) used = models.BigIntegerField(default=0, editable=False) timestamp = models.DateTimeField(auto_now_add=True) def __unicode__(self): return "%s - %s de %s (%.2f%%)" % (self.timestamp.strftime("%H:%M:%S %d/%m/%Y"), self.used, self.total, (self.used*100/self.total)) def update_pool_size(procedure): offsite_conf = Offsite.get_instance() if offsite_conf.active and offsite_conf.host != offsite_conf.AMZ_S3_HOST: procedure.pool_size = settings.DEFAULT_PROCEDURE_POOL_SIZE # TODO: change model field to integer else: procedure.pool_size = 0 signals.connect_on(update_offsite, Offsite, post_save) signals.connect_on( update_pool_size, Procedure, pre_save)
def update_nimbus_client_address(interface): from nimbus.computers.models import Computer # Ver nota nos imports iniciais computer = Computer.objects.get(id=1) # storage default computer.address = interface.address computer.save(system_permission=True) logger = logging.getLogger(__name__) logger.info("Atualizando ip do client nimbus") def get_nimbus_address(): from nimbus.config.models import Config # Ver nota nos imports iniciais config = Config.get_instance() if not config.director_address: return get_raw_network_interface_address() return config.director_address def get_raw_network_interface_address(): raw_iface = networkutils.get_interfaces()[0] return raw_iface.addr signals.connect_on(update_networks_file, NetworkInterface, post_save) signals.connect_on(update_director_address, NetworkInterface, post_save) signals.connect_on(update_storage_address, NetworkInterface, post_save) signals.connect_on(update_nimbus_client_address, NetworkInterface, post_save)
os.remove(volume_abs_path) reload_manager = ReloadManager() reload_manager.force_reload() except BConsoleInitError, error: logger = logging.getLogger(__name__) logger.exception("Erro na comunicação com o bacula") def update_pool_file(procedure): """Pool update pool bacula file""" name = procedure.pool_bacula_name() filename = path.join(settings.NIMBUS_POOLS_DIR, name) render_to_file(filename, "pool", name=name, max_vol_bytes=procedure.pool_size, days=procedure.pool_retention_time) def remove_pool_file(procedure): """pool remove file""" name = procedure.pool_bacula_name() filename = path.join(settings.NIMBUS_POOLS_DIR, name) utils.remove_or_leave(filename) #signals.connect_on(update_pool_file, Procedure, post_save) #signals.connect_on(remove_pool_file, Procedure, post_delete) signals.connect_on( update_procedure_file, Procedure, post_save) signals.connect_on( remove_procedure_volumes, Procedure, post_delete) signals.connect_on( remove_procedure_file, Procedure, post_delete)
filename = path.join(settings.NIMBUS_POOLS_DIR, name) utils.remove_or_leave(filename) #signals.connect_on(update_pool_file, Procedure, post_save) #signals.connect_on(remove_pool_file, Procedure, post_delete) def pre_delete_procedure(procedure): #Execute on_remove de todos os job_tasks for r in procedure.job_tasks.all(): if r.creator: r.creator.on_remove(procedure) def change_job_tasks(sender, instance, action, reverse, model, pk_set, **kwargs): update_procedure_file(instance) def update_job_tasks(job_tasks): procedures = Procedure.objects.filter(active=True) for procedure in procedures: if procedure.job_tasks.all(): update_procedure_file(procedure) m2m_changed.connect(change_job_tasks, sender=Procedure.job_tasks.through) # signals.connect_on( offsiteconf_check, Procedure, pre_save) signals.connect_on( update_procedure_file, Procedure, post_save) signals.connect_on( update_job_tasks, JobTask, post_save) signals.connect_on(pre_delete_procedure, Procedure, pre_delete) signals.connect_on( remove_procedure_volumes, Procedure, post_delete) signals.connect_on( remove_procedure_file, Procedure, post_delete)