def get_redirected(url, del_params=False, timeout=6): """Get the url that a link eventually redirects to""" if not url: return None try: response = requests.get(url, timeout) except: sleep(20) try: response = requests.get(url, timeout) except: if has_gymail: send_mail( event="error", subject=os.path.basename(__file__), message="Download from %s failed. Site possibly down." % url ) log.error("Download from %s failed. Site possibly down." % url) return None if response.history: print("Request was redirected") for resp in response.history: print(resp.status_code, resp.url) print("Final destination:") url = response.url print(response.status_code, url) if del_params: url = del_url_params(url) return url else: print("Request was not redirected") return None
def main(): task = sys.argv[1] dev = "/dev/%s" % sys.argv[2] if len(sys.argv) > 2 else None log.info("offsiteudev: task: %s, dev: %s" % (task, dev)) if task == "mount" and dev: mkdir_p(mp) if mount(dev=dev, mp=mp): log.info("Mounted %s to %s" % (dev, mp)) elif task == "add" and dev: mkdir_p(mp) if mount(dev=dev, mp=mp): for x in backup_dirs: x = os.path.join(mp, x) mkdir_p(x) uid, gid = pwd.getpwnam(chown_user).pw_uid, grp.getgrnam( chown_group).gr_gid os.chown(x, uid, gid) log.info("Running job now") [try_func(run_job, x, ssh_alias) for x in copy_jobs] log.info("Run jobs: %s" % ", ".join(copy_jobs)) else: msg = "Couldn't mount offsite hdd, thus offsite backup not initiated." log.error(msg) send_mail(event="error", subject=os.path.basename(__file__), message=msg) elif task == "umount": umount(mp) msg = "Offsite backup completed successfully." log.info(msg) send_mail(event="info", subject=os.path.basename(__file__), message=msg) log.info("Done")
def evaluate_age(self, app_ages, app_config): app, age_dict = app_ages for host, age in age_dict.items(): if min(age) > app_config["max_age"]: sbj = "(ERROR) %s %s is %s days old" % (app, host, min(age)) msg = sbj send_mail(event="error", subject=sbj, message=msg)
def run(self): job_conf = self.dir_conf.get('Job', None) job_excludes = CONF("job_excludes") if isinstance(job_excludes, str) or isinstance(job_excludes, int): print("Config error: job_excludes has to be a list.") if job_excludes: for job in job_excludes: job_conf.pop(job, None) jobs = list(job_conf) job_patterns = CONF("job_patterns") if isinstance(job_patterns, str) or isinstance(job_patterns, int): print("Config error: job_patterns has to be a list.") if job_patterns and jobs: job_patterns = "(" + ")|(".join(job_patterns) + ")" jobs = [job for job in jobs if not re.match(job_patterns, job)] # j.jobid, j.name, m.volumename, s.name query = """ SELECT DISTINCT j.name, max(j.realendtime) FROM job j, media m, jobmedia jm, storage s WHERE m.mediaid=jm.mediaid AND j.jobid=jm.jobid AND s.storageid=m.storageid AND j.name IN {0} AND j.jobstatus IN ('T', 'W') AND j.level IN ('F', 'I', 'D') AND j.type IN ('B', 'C', 'c') GROUP BY j.name; """.format(tuple(jobs)) try: con = psycopg2.connect(database=db_name, user=db_user, host=db_host, password=db_password) cur = con.cursor() cur.execute(query) res = cur.fetchall() except Exception as e: print(format_exception(e)) return for job in res: name = job[0] realendtime = job[1] age = (datetime.datetime.now() - realendtime).days if age > CONF("max_days"): print( "Warning: Backup job '{0}' is older than {2} days. Last backup is from " "{1}.".format(name, realendtime, CONF("max_days"))) if not self.dry_run: send_mail( "Error", "HIGH: Backup older than %s days" % CONF("max_days"), "Backup job '{0}' is older than {2} days. Last backup is from " "{1}. Check the backup-server for errors ASAP." "".format(name, realendtime, CONF("max_days"))) else: print("OK: Backup job '{0}' is fine. Last backup is from " "{1}.".format(name, realendtime))
def main(): systemd_services_up(services) offsite_ts = newest_offsite_backup() if offsite_ts: current_ts = int(time.time()) offsite_days = (offsite_ts - current_ts) / (60*60*24) if offsite_days > max_offsite_days: msg = "Offsite backups are too old %s" % (host, mp) send_mail(event="error", subject=os.path.basename(__file__), message=msg) else: print("Last copy job from %s is younger than %s days" % max_offsite_days) else: print("No copy backup found")
def run(): systemd_services_up(services) offsite_ts = newest_offsite_backup() if offsite_ts: current_ts = int(time.time()) offsite_days = (offsite_ts - current_ts) / (60*60*24) if offsite_days > CONF('MAX_OFFSITE_AGE_DAYS'): msg = "Offsite backups are too old %s" % (host, mp) send_mail(event="error", subject=os.path.basename(__file__), message=msg) else: print("Last copy job from %s is younger than %s days" % max_offsite_days) else: print("No copy backup found")
def run(): default_disks = [(x, default_limit) for x in mounted_disks() if x not in [y[0] for y in CONF('CUSTOM_DISKS')]] for x, y in default_disks + CONF('CUSTOM_DISKS'): df = subprocess.Popen(["df", x], stdout=subprocess.PIPE) output = df.communicate()[0] fs, blocks, used, available, percent, mountpoint = output.decode("utf-8").split("\n")[1].split() used_percent = int(percent.strip("%")) if used_percent > y: msg = "%s\tWarning: %s%% used (limit: %s%%). Sending notifcation mail" % (x, used_percent, y) print(msg) send_mail(event="error", subject=os.path.basename(__file__), message=msg) else: print("%s\tOkay: %s%% used (limit: %s%%)" % (x, used_percent, y))
def my_ip(proxies=None, timeout=6): kwargs = { "proxies": proxies } if proxies else {} url = 'http://myip.dnsomatic.com' try: r = requests.get(url, timeout, **kwargs) except: if has_gymail: send_mail( event="error", subject=os.path.basename(__file__), message="Download from %s failed. Site possibly down." % url ) log.error("Download from %s failed. Site possibly down." % url) return None return r.text
async def uptime_eval(self): """Check uptime for all hosts""" while True: await asyncio.sleep(1) for host, data in self.hosts.items(): now = time.time() if not data["notified"]: data["notified"] = CONF('DELAY_NOTIFY') + 1 if not data["uptime"]: data["uptime"] = CONF('DOWNTIME') + 1 + now if now - data["notified"] > CONF('DELAY_NOTIFY'): if now - data["uptime"] > CONF('DOWNTIME'): msg = "%s hasn't responded since %s seconds" % (host, CONF('DOWNTIME')) log.error(msg) send_mail("error", "Host %s is down" % host, msg) data["notified"] = time.time() if data["last_backup"]: if data["last_backup"] > CONF('LAST_BACKUP_MAX_DAYS'): ## * (24*60*60): msg = "Last backup is older than %s days for director host %s." % ( CONF('LAST_BACKUP_MAX_DAYS'), host ) log.error(msg) send_mail("error", "Backup too old for host %s" % host, msg) data["notified"] = time.time() if data["monitor_jobs"]: for job_name, job_last_backup in data["monitor_jobs"].items(): if job_last_backup > CONF('LAST_BACKUP_MAX_DAYS'): ## * (24*60*60): msg = """\ The backup job %s is older than %s days for the director host %s.\ """ % (job_name, CONF('LAST_BACKUP_MAX_DAYS'), host) log.error(msg) send_mail("error", "Backup too old for host %s" % host, msg) data["notified"] = time.time()
def main(self): self.get_config() for app, app_config in self.conf.items(): if app in self.registered_app(): print("(RUN APP) %s" % app) _app = getattr(self, app) try: app_ages = _app(app_config) except Exception as e: sbj = "(ERROR) %s failed" % (app) msg = "Exception message: %s" % format_exception(e) send_mail(event="error", subject=sbj, message=msg) if app_config["evaluate_method"] in self.registered_evaluation(): print("(RUN EVAL) %s" % app_config["evaluate_method"]) evaluate_method = getattr( self, self.registered_evaluation().get( app_config["evaluate_method"])) try: evaluate_method(app_ages, app_config) except Exception as e: sbj = "(ERROR) %s failed" % app_config["evaluate_method"] msg = "Exception message: %s" % format_exception(e) send_mail(event="error", subject=sbj, message=msg)
def download(filename, download_link, timeout=6, proxies=None, tor=False, display_ip=False): """Download file from given download link to given local filename path. timeout := Nearly all production code should use this parameter in nearly all requests. Failure to do so can cause your program to hang indefinitely: """ with open(filename, 'wb') as f: kwargs = {} if tor: log.debug("requests with tor enabled, using requests >=2.10 with socks5 support.") log.debug("Current IP: %s" % my_ip(proxies=tor_proxies)) kwargs = { "proxies": tor_proxies } elif proxies: log.debug("Using custom proxies.") if display_ip: log.debug("Current IP: %s" % my_ip(proxies=proxies)) kwargs = { "proxies": proxies } # Preventing exceptions when it times out due to temporary network issues # Try/Except is important here, because during network issues it will cause an exception. try: log.debug("Starting HTTP request to %s" % download_link) response = requests.get(download_link, timeout=timeout, stream=True, **kwargs) except: if has_gymail: send_mail(event="error", subject=os.path.basename(__file__), message="Download from %s failed. Site possibly down." % download_link) log.error("Download from %s failed. Site possibly down." % download_link) return False try: if not response.ok: if has_gymail: send_mail(event="error", subject=os.path.basename(__file__), message="Download from %s failed. Site possibly down." % download_link) log.error("Download from %s failed. Site possibly down.") return False response_text = response.text for block in response.iter_content(1024): f.write(block) except: if has_gymail: send_mail(event="error", subject=os.path.basename(__file__), message="Download from %s failed. Site possibly down." % download_link) log.error("Download from %s failed. Site possibly down." % download_link) return False if response_text: return response_text else: return True