def create_db(delete=False): with open('config/mysqlconfig.yaml', 'r') as cfile: conf = yaml.load(cfile)['mysql'] conf.pop('db', None) con = mydb.connect(**conf) try: con.select_db('des') except: backup.restore() con.commit() con.select_db('des') cur = con.cursor() if delete: cur.execute("DROP TABLE IF EXISTS Jobs") cur.execute(""" CREATE TABLE IF NOT EXISTS Jobs( user varchar(50), job varchar(50), name text, status text, time datetime, type text, query mediumtext, files mediumtext, sizes text, runtime int )""") con.commit() con.close()
def prepare(): log('prepare the environment', 'info') # from this point we are sure we don't have to be careful # with local files/devices/disks/etc dst = "/etc/redis/redis.conf" redis = "{0}/etc/redis/{1}.conf".format(path, persistence) cron = "{0}/cron.d/{1}.cron".format(path, persistence) # redis will start with this conf log('configuring redis', 'info') os.system("/bin/cp -f {0} {1}".format(redis, dst)) if maxmemory > 0: os.system("/bin/sed 's/^# maxmemory <bytes>.*$/maxmemory {0}/' -i {1}".format(maxmemory, dst)) if policy != None: os.system("/bin/sed 's/^# maxmemory-policy.*$/maxmemory-policy {0}/' -i {1}".format(policy, dst)) # and root's cron will be set accordingly as well log('setting up cron', 'info') os.system("/bin/sed 's:INSTALLPATH:{0}:' {1} | /usr/bin/crontab".format(path, cron)) # ok, ready to set up assets like bucket and volume # also, if we have a valid mount, we don't do anything log('set up persistence', 'info') if os.path.ismount(mount) == False and "no" != persistence: log('create bucket {0}'.format(cluster), 'info') backup.create_bucket(key, access, cluster) try: # only try to create one if we have one if "" == snapshot or None == snapshot: raise Exception('metadata','empty snapshot') else: create_device(snapshot) except: try: latest = administration.get_latest_snapshot(key, access, cluster) create_device(latest) except: create_device() # we have a bucket, and perhaps a device. lets try to restore # from rdb, first from metadata later from user_data. if rdb != None and "" != rdb: log('restore rdb {0}/{1}'.format(cluster, rdb), 'info') backup.restore(key, access, cluster, rdb) latest = administration.get_latest_RDB(key, access, cluster) if "" != latest and None != latest: log('restore rdb {0}/{1}'.format(cluster, latest), 'info') backup.restore(key, access, cluster, latest)
def restore(): networklist = [] namelist = [] backuplist = [] for name in os.listdir('configs/'): if name.endswith('.json'): backuplist.append(name) n = 0 for network in dashboard.organizations.getOrganizationNetworks(org_id): networklist.append(network) n += 1 if request.method == 'GET': return render_template('restore.html', title='Restore', tab=1, netdata=networklist, status="Waiting for job", backuplist=backuplist) elif request.method == 'POST': netslist = request.form.getlist('nets') backupfile = request.form.get('backups') for network in netslist: bk.restore(network, backupfile) for network in netslist: namelist.append(dashboard.networks.getNetwork(network)['name']) return render_template('restore.html', title='Restore', tab=1, netdata=networklist, status="Finished Restore", netslist=namelist, backuplist=backuplist)
def restore_backup(self, fileUpload): logging.info('Restoring backup from uploaded zip.') n = datetime.datetime.today().microsecond tmp_zip = os.path.join(core.PROG_PATH, 'restore_{}.zip'.format(n)) try: with open(tmp_zip, 'wb') as f: f.seek(0) f.write(fileUpload.file.read()) logging.info('Restore zip temporarily stored as {}.'.format(tmp_zip)) backup.restore(require_confirm=False, file=tmp_zip) logging.info('Removing temporary zip {}'.format(tmp_zip)) os.unlink(tmp_zip) except Exception as e: logging.error('Unable to restore backup.', exc_info=True) return {'response': False} threading.Timer(3, core.restart).start() return {'response': True}
def restore(tasks): log.info('Start backup restore.') complete = [] for item in tasks: error = backup.restore(item['backup_id']) if error: log.error(error) break else: complete.append(item['id']) else: log.info('All restore tasks are complete.') if complete: api.restore_complete(complete) del tasks[:len(complete)] return not tasks
def restore(): status, content = api.check_restore() if status == 200: if content: log.info("Start backup restore.") complete = [] for item in content: error = backup.restore(item["key"], paths=item.get("items")) if error: log.error(error) break else: complete.append(item["id"]) else: log.info("All restore tasks are complete.") if complete: api.restore_complete(complete) return True else: return False
def restore(): status, content = api.check_restore() if status == 200: if content: log.info('Start backup restore.') complete = [] for item in content: error = backup.restore(item['key'], paths=item.get('items')) if error: log.error(error) break else: complete.append(item['id']) else: log.info('All restore tasks are complete.') if complete: api.restore_complete(complete) return True else: return False
# elif mode == 'teststuff': # freshstart.remove_db() ####################################### elif mode == 'backup_restore': backup.backup_menu() elif mode == 'full_backup': backup.full_backup() elif mode == 'small_backup': backup.no_data_backup() elif mode == 'do_backup_restore': backup.restore() elif mode == 'display_backup_settings': kodi.openSettings(addon_id, id1=0, id2=0) elif mode == 'read_zip': backup.read_zip(url) elif mode == 'del_backup': backup.ListBackDel() elif mode == 'do_del_backup': backup.DeleteBackup(url) xbmcplugin.endOfDirectory(int(sys.argv[1]))
def execute_update(self): ''' Performs update process Creates temporary directory to store update files Downloads zip from github and extracts Switches log handler log location in update dir Backs up user's files Overwrites all files with files from zip Restores user's files Appends temporary log to original log file Retores original log handler Removes temporary dir Returns bool ''' logging.info('Updating from Zip file.') os.chdir(core.PROG_PATH) update_zip = 'update.zip' update_path = 'update' new_hash = self.get_newest_hash() logging.info('Cleaning up old update files.') try: if os.path.isfile(update_zip): os.remove(update_zip) if os.path.isdir(update_path): shutil.rmtree(update_path) os.mkdir(update_path) except Exception as e: logging.error('Could not delete old update files.', exc_info=True) return False logging.info('Creating temporary update log file.') formatter = logmodule.Formatter( '%(levelname)s %(asctime)s %(name)s.%(funcName)s: %(message)s') handler = logmodule.FileHandler(os.path.join(update_path, 'log.txt'), 'a') handler.setFormatter(formatter) logging.debug('Switching to temporary log handler while updating.') orig_log_handler = self.switch_log(handler) logging.info('Downloading latest Zip.') zip_url = '{}/archive/{}.zip'.format(core.GIT_URL, self.branch) try: zip_bytes = Url.open(zip_url, stream=True).content with open(update_zip, 'wb') as f: f.write(zip_bytes) del zip_bytes except Exception as e: logging.error('Could not download latest Zip.', exc_info=True) return False logging.info('Extracting Zip to temporary directory.') try: with zipfile.ZipFile(update_zip) as f: f.extractall(update_path) except Exception as e: logging.error('Could not extract Zip.', exc_info=True) return False logging.info('Backing up user\'s files.') backup.backup(require_confirm=False) # reset update status so it doesn't ask us to update again core.UPDATE_STATUS = None logging.info('Moving update files.') subfolder = 'Watcher3-{}'.format(self.branch) update_files_path = os.path.join(update_path, subfolder) try: files = os.listdir(update_files_path) for file in files: src = os.path.join(update_files_path, file) dst = file if os.path.isfile(src): if os.path.isfile(dst): os.remove(dst) shutil.copy2(src, dst) elif os.path.isdir(src): if os.path.isdir(dst): shutil.rmtree(dst) shutil.copytree(src, dst) except Exception as e: logging.error('Could not move update files.', exc_info=True) return False logging.info('Restoring user files.') backup.restore(require_confirm=False) logging.info('Setting new version file.') try: with open(self.version_file, 'w') as f: f.write(new_hash) except Exception as e: logging.error('Could not update version file.', exc_info=True) return False logging.info('Merging update log with master.') with open(orig_log_handler.baseFilename, 'a') as log: with open(os.path.join(update_path, 'log.txt'), 'r') as u_log: log.write(u_log.read()) logging.info('Changing log handler back to original.') self.switch_log(orig_log_handler) logging.info('Cleaning up temporary files.') try: shutil.rmtree(update_path) os.remove(update_zip) except Exception as e: logging.error('Could not delete temporary files.', exc_info=True) return False logging.info('Update successful.') return True
dst = file if os.path.isfile(src): if os.path.isfile(dst): os.remove(dst) shutil.copy2(src, dst) elif os.path.isdir(src): if os.path.isdir(dst): shutil.rmtree(dst) shutil.copytree(src, dst) except Exception, e: logging.error(u'Could not move update files.', exc_info=True) return False logging.info(u'Restoring user files.') backup.restore(require_confirm=False) logging.info(u'Setting new version file.') try: with open(self.version_file, 'w') as f: f.write(new_hash) except Exception, e: logging.error(u'Could not update version file.', exc_info=True) return False logging.info(u'Merging update log with master.') with open(orig_log_handler.baseFilename, 'a') as log: with open(os.path.join(update_path, 'log.txt'), 'r') as u_log: log.write(u_log.read()) logging.info(u'Changing log handler back to original.')
exit(f"Une erreur est survenue désolé bro : {e}") os.remove("./temp/backup.bcp") return transformation if __name__ == '__main__': """ USAGE: python3 brogramme.py :valeur :operations[] RECOVERY MODE: python3 brogramme.py """ if len(sys.argv) <= 2: if os.path.exists("./temp/backup.bcp"): print("RECOVERY MODE BRO !") data = restore() result = process(data) print("Résultat Final Bro :") print(result) else: exit( "Il manque un argument bro c'est nombre initial suivi des instructions" ) else: try: valeur_client = int(sys.argv[1]) instructions = sys.argv[2:] whitelist = ["add", "sub"] for instruction in instructions: if instruction not in whitelist:
elif mode == 'teststuff': freshstart.remove_db() ####################################### elif mode == 'backup_restore': backup.backup_menu() elif mode == 'full_backup': backup.full_backup() elif mode == 'small_backup': backup.no_data_backup() elif mode == 'do_backup_restore': backup.restore() elif mode == 'display_backup_settings': kodi.openSettings(addon_id,id1=0,id2=0) elif mode == 'read_zip': backup.read_zip(url) elif mode == 'del_backup': backup.ListBackDel() elif mode == 'do_del_backup': backup.DeleteBackup(url) xbmcplugin.endOfDirectory(int(sys.argv[1]))
def execute_update(self): os.chdir(core.PROG_PATH) update_zip = 'update.zip' update_path = 'update' new_hash = self.get_newest_hash() logging.info('Updating from Zip file.') logging.info('Cleaning up old update files.') try: if os.path.isfile(update_zip): os.remove(update_zip) if os.path.isdir(update_path): shutil.rmtree(update_path) os.mkdir(update_path) except Exception as e: logging.error('Could not delete old update files.', exc_info=True) return False logging.info('Creating temporary update log file.') orig_log_handler = self.switch_log(new_path=update_path) logging.info('Downloading latest Zip.') zip_url = '{}/archive/{}.zip'.format( core.GIT_URL, core.CONFIG['Server']['gitbranch']) try: zip_bytes = Url.open(zip_url, stream=True).content with open(update_zip, 'wb') as f: f.write(zip_bytes) del zip_bytes except Exception as e: logging.error('Could not download latest Zip.', exc_info=True) return False logging.info('Extracting Zip to temporary directory.') try: with zipfile.ZipFile(update_zip) as f: f.extractall(update_path) except Exception as e: logging.error('Could not extract Zip.', exc_info=True) return False logging.info('Backing up user files.') backup.backup(require_confirm=False) # reset update status so it doesn't ask us to update again core.UPDATE_STATUS = None logging.info('Moving update files.') subfolder = 'Watcher3-{}'.format(core.CONFIG['Server']['gitbranch']) update_files_path = os.path.join(update_path, subfolder) try: files = os.listdir(update_files_path) for file in files: src = os.path.join(update_files_path, file) dst = file if os.path.isfile(src): if os.path.isfile(dst): os.remove(dst) shutil.copy2(src, dst) elif os.path.isdir(src): if os.path.isdir(dst): shutil.rmtree(dst) shutil.copytree(src, dst) except Exception as e: logging.error('Could not move update files.', exc_info=True) return False logging.info('Restoring user files.') backup.restore(require_confirm=False) logging.info('Setting new version file.') try: with open(self.version_file, 'w') as f: f.write(new_hash) except Exception as e: logging.error('Could not update version file.', exc_info=True) return False logging.info('Merging update log with master.') with open(orig_log_handler.baseFilename, 'a') as log: with open(os.path.join(update_path, 'log.txt'), 'r') as u_log: log.write(u_log.read()) logging.info('Changing log handler back to original.') self.switch_log(handler=orig_log_handler) logging.info('Cleaning up temporary files.') try: shutil.rmtree(update_path) os.remove(update_zip) except Exception as e: # noqa logging.error('Could not delete temporary files.', exc_info=True) return False logging.info('Update successful.') return True
file=file): if file: link.link_file(sub_dir, card_dir, file, max_backup=max_backup) else: link.link_files(sub_dir, card_dir, max_backup=max_backup) elif args.subcommand == 'unlink': if file: link.unlink_file(file) else: function = link.unlink_file elif args.subcommand == 'backup': if file: backup.backup(file, max_backup=max_backup) else: function = backup.backup else: # elif args.subcommand == 'restore': backup.restore(file, args.number) try: if args.batch: batch.batch(function, card_dir, max_backup=max_backup) elif args.batch_region: batch.batch_region(function, base_dir, region, max_backup=max_backup) else: batch.batch_all(function, base_dir, max_backup=max_backup) except AttributeError: pass