def generate_compare_report(self, image): # this routine reads the results of image compare and generates a formatted report report = {} rootdir = image.anchore_imagedir + "/compare_output/" for b in os.listdir(rootdir): if b not in report: report[b] = {} comparedir = rootdir + "/" + b + "/" for d in os.listdir(comparedir): if d == 'differs.done': continue if d not in report[b]: report[b][d] = {} moduledir = comparedir + "/" + d for o in os.listdir(moduledir): datafile = moduledir + "/" + o if o not in report[b][d]: report[b][d][o] = list() report[b][d][o] = anchore_utils.read_plainfile_tolist(datafile) return (report)
def generate_compare_report(self, image): # this routine reads the results of image compare and generates a formatted report report = {} rootdir = image.anchore_imagedir + "/compare_output/" for b in os.listdir(rootdir): if b not in report: report[b] = {} comparedir = rootdir + "/" + b + "/" for d in os.listdir(comparedir): if d == 'differs.done': continue if d not in report[b]: report[b][d] = {} moduledir = comparedir + "/" + d for o in os.listdir(moduledir): datafile = moduledir + "/" + o if o not in report[b][d]: report[b][d][o] = list() report[b][d][o] = anchore_utils.read_plainfile_tolist( datafile) return (report)
def edit_policy_file(self, editpolicy=False, whitelist=False): ret = True if not editpolicy and not whitelist: # nothing to do return (ret) for imageId in self.images: if editpolicy: data = self.anchoreDB.load_gate_policy(imageId) else: data = self.anchoreDB.load_gate_whitelist(imageId) if not data: self._logger.info( "Cannot find existing data to edit, skipping: " + str(imageId)) else: tmpdir = anchore_utils.make_anchoretmpdir("/tmp") try: thefile = os.path.join(tmpdir, "anchorepol." + imageId) anchore_utils.write_plainfile_fromlist(thefile, data) if "EDITOR" in os.environ: cmd = os.environ["EDITOR"].split() cmd.append(thefile) try: subprocess.check_output(cmd, shell=False) except: ret = False elif os.path.exists("/bin/vi"): try: rc = os.system("/bin/vi " + thefile) if rc: ret = False except: ret = False else: self._logger.info( "Cannot find editor to use: please set the EDITOR environment variable and try again" ) break ret = False newdata = anchore_utils.read_plainfile_tolist(thefile) if editpolicy: self.anchoreDB.save_gate_policy(imageId, newdata) else: self.anchoreDB.save_gate_whitelist(imageId, newdata) except Exception as err: pass finally: if tmpdir: shutil.rmtree(tmpdir) return (ret)
def updatepolicy(self, newpolicyfile): policy_data = anchore_utils.read_plainfile_tolist(newpolicyfile) newpol = self.read_policy(policy_data) for imageId in self.images: if imageId in self.allimages: try: self.save_policy(imageId, newpol) except Exception as err: self._logger.error("failed to update policy for image ("+imageId+"). bailing out: " + str(err)) return(False) return(True)
def generate_gates_report(self, image): # this routine reads the results of image gates and generates a formatted report report = {} analysisdir = image.anchore_imagedir + "/gates_output/" for d in os.listdir(analysisdir): if re.match(".*\.eval$", d) or re.match(".*\.help$", d): continue if d not in report: report[d] = list() report[d] = anchore_utils.read_plainfile_tolist('/'.join([analysisdir, d])) return(report)
def generate_analysis_report(self, image): # this routine reads the results of image analysis and generates a formatted report report = {} analysisdir = image.anchore_imagedir + "/analyzer_output/" for d in os.listdir(analysisdir): if d not in report: report[d] = {} moduledir = analysisdir + "/" + d for o in os.listdir(moduledir): datafile = moduledir + "/" + o if o not in report[d]: report[d][o] = list() report[d][o] = anchore_utils.read_plainfile_tolist(datafile) return (report)
def generate_analysis_report(self, image): # this routine reads the results of image analysis and generates a formatted report report = {} analysisdir = image.anchore_imagedir + "/analyzer_output/" for d in os.listdir(analysisdir): if d not in report: report[d] = {} moduledir = analysisdir + "/" + d for o in os.listdir(moduledir): datafile = moduledir + "/" + o if o not in report[d]: report[d][o] = list() report[d][o] = anchore_utils.read_plainfile_tolist(datafile) return (report)
def evaluate_gates_results(self, image): ret = list() final_gate_action = 'GO' policies_whitelist = self.load_whitelist(image) if self.policy_override: policy_data = anchore_utils.read_plainfile_tolist(self.policy_override) policies = self.read_policy(policy_data) else: policies = self.get_image_policies(image) for m in policies.keys(): gdata = self.anchoreDB.load_gate_output(image.meta['imageId'], m) for l in gdata: (k, v) = re.match('(\S*)\s*(.*)', l).group(1, 2) if k in policies[m]: r = {'imageId':image.meta['imageId'], 'check':m, 'trigger':k, 'output':v, 'action':policies[m][k]['action']} # this is where whitelist check should go if r not in policies_whitelist['ignore']: if policies[m][k]['action'] == 'STOP': final_gate_action = 'STOP' elif final_gate_action != 'STOP' and policies[m][k]['action'] == 'WARN': final_gate_action = 'WARN' ret.append(r) else: # whitelisted, skip evaluation pass self.save_whitelist(image, policies_whitelist, ret) ret.append({'imageId':image.meta['imageId'], 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action}) for i in ret: self.anchoreDB.del_gate_eval_output(image.meta['imageId'], i['check']) evals = {} for i in ret: if i['check'] not in evals: evals[i['check']] = list() evals[i['check']].append(' '.join([i['trigger'], i['action']])) for i in evals.keys(): self.anchoreDB.save_gate_eval_output(image.meta['imageId'], i, evals[i]) self.anchoreDB.save_gates_eval_report(image.meta['imageId'], ret) return(ret)
def generate_gates_report(self, image): # this routine reads the results of image gates and generates a formatted report report = {} analysisdir = image.anchore_imagedir + "/gates_output/" for d in os.listdir(analysisdir): if re.match(".*\.eval$", d): continue if d not in report: report[d] = list() report[d] = anchore_utils.read_plainfile_tolist('/'.join([analysisdir, d])) #FH = open(analysisdir + "/" + d, 'r') #for l in FH.readlines(): # l = l.strip() # report[d].append(l) #FH.close() return(report)
def generate_gates_report(self, image): # this routine reads the results of image gates and generates a formatted report report = {} analysisdir = image.anchore_imagedir + "/gates_output/" for d in os.listdir(analysisdir): if re.match(".*\.eval$", d): continue if d not in report: report[d] = list() report[d] = anchore_utils.read_plainfile_tolist('/'.join( [analysisdir, d])) #FH = open(analysisdir + "/" + d, 'r') #for l in FH.readlines(): # l = l.strip() # report[d].append(l) #FH.close() return (report)
def get_image_policies(self, image): # load default and image override policies, merge (if new # checks are in default), and save (if there is a diff after # the merge) policy_data = anchore_utils.read_plainfile_tolist(self.default_gatepol) default_policies = self.read_policy(policy_data) policy_data = self.anchoreDB.load_gate_policy(image.meta['imageId']) image_policies = self.read_policy(policy_data) if image_policies and default_policies: policies = self.merge_policies(image_policies, default_policies) if policies != image_policies: self.save_policy(image.meta['imageId'], policies) else: policies = default_policies self.save_policy(image.meta['imageId'], policies) return (policies)
def load_gate_output(self, imageId, gate_name): thedir = os.path.join(self.imagerootdir, imageId, 'gates_output') thefile = os.path.join(thedir, gate_name) return (anchore_utils.read_plainfile_tolist(thefile))
def load_gate_whitelist(self, imageId): thefile = os.path.join(self.imagerootdir, imageId, 'anchore_gate.whitelist') return(anchore_utils.read_plainfile_tolist(thefile))
def load_gate_output(self, imageId, gate_name): thedir = os.path.join(self.imagerootdir, imageId, 'gates_output') thefile = os.path.join(thedir, gate_name) return(anchore_utils.read_plainfile_tolist(thefile))
def execute_query(self, imglist, se, params): success = True datadir = self.config['image_data_store'] outputdir = '/'.join([ self.config['anchore_data_dir'], "querytmp", "query." + str(random.randint(0, 99999999)) ]) if not os.path.exists(outputdir): os.makedirs(outputdir) imgfile = '/'.join([ self.config['anchore_data_dir'], "querytmp", "queryimages." + str(random.randint(0, 99999999)) ]) anchore_utils.write_plainfile_fromlist(imgfile, imglist) cmdline = ' '.join([imgfile, datadir, outputdir]) if params: cmdline = cmdline + ' ' + ' '.join(params) meta = {} try: (cmd, rc, sout) = se.execute(capture_output=True, cmdline=cmdline) if rc: self._logger.error("Query command ran but execution failed") self._logger.error("Query command: (" + ' '.join([se.thecmd, cmdline]) + ")") self._logger.error("Query output: (" + str(sout) + ")") self._logger.error("Exit code: (" + str(rc) + ")") raise Exception("Query ran but exited non-zero.") except Exception as err: raise Exception("Query execution failed: " + str(err)) else: try: #outputs = os.listdir(outputdir) warnfile = False found = False for f in os.listdir(outputdir): if re.match(".*\.WARNS", f): warnfile = '/'.join([outputdir, f]) else: ofile = '/'.join([outputdir, f]) found = True if not found: raise Exception( "No output files found after executing query command\n\tCommand Output:\n" + sout + "\n\tInfo: Query command should have produced an output file in: " + outputdir) orows = list() try: frows = anchore_utils.read_kvfile_tolist(ofile) header = frows[0] rowlen = len(header) for row in frows[1:]: if len(row) != rowlen: raise Exception( "Number of columns in data row (" + str(len(row)) + ") is not equal to number of columns in header (" + str(rowlen) + ")\n\tHeader: " + str(header) + "\n\tOffending Row: " + str(row)) orows.append(row) except Exception as err: raise err if warnfile: try: meta['warns'] = anchore_utils.read_plainfile_tolist( warnfile) except: pass meta['queryparams'] = ','.join(params) meta['querycommand'] = cmd try: i = header.index('URL') meta['url_column_index'] = i except: pass meta['result'] = {} meta['result']['header'] = header meta['result']['rowcount'] = len(orows) try: #meta['result']['colcount'] = len(orows[0]) meta['result']['colcount'] = len(header) except: meta['result']['colcount'] = 0 meta['result']['rows'] = orows except Exception as err: self._logger.error("Query output handling failed: ") self._logger.error("\tCommand: " + str(cmd)) self._logger.error("\tException: " + str(err)) success = False finally: if imgfile and os.path.exists(imgfile): os.remove(imgfile) if outputdir and os.path.exists(outputdir): shutil.rmtree(outputdir) ret = [success, cmd, meta] return (ret)
def execute_query(self, imglist, se, params): success = True datadir = self.config['image_data_store'] outputdir = '/'.join([self.config['anchore_data_dir'], "querytmp", "query." + str(random.randint(0, 99999999))]) if not os.path.exists(outputdir): os.makedirs(outputdir) imgfile = '/'.join([self.config['anchore_data_dir'], "querytmp", "queryimages." + str(random.randint(0, 99999999))]) anchore_utils.write_plainfile_fromlist(imgfile, imglist) cmdline = ' '.join([imgfile, datadir, outputdir]) if params: cmdline = cmdline + ' ' + ' '.join(params) meta = {} try: (cmd, rc, sout) = se.execute(capture_output=True, cmdline=cmdline) if rc: self._logger.error("Query command ran but execution failed" ) self._logger.error("Query command: (" + ' '.join([se.thecmd, cmdline])+")") self._logger.error("Query output: (" + str(sout) + ")") self._logger.error("Exit code: (" + str(rc)+")") raise Exception("Query ran but exited non-zero.") except Exception as err: raise Exception("Query execution failed: " + str(err)) else: try: #outputs = os.listdir(outputdir) warnfile = False found = False for f in os.listdir(outputdir): if re.match(".*\.WARNS", f): warnfile = '/'.join([outputdir, f]) else: ofile = '/'.join([outputdir, f]) found=True if not found: raise Exception("No output files found after executing query command\n\tCommand Output:\n"+sout+"\n\tInfo: Query command should have produced an output file in: " + outputdir) orows = list() try: frows = anchore_utils.read_kvfile_tolist(ofile) header = frows[0] rowlen = len(header) for row in frows[1:]: if len(row) != rowlen: raise Exception("Number of columns in data row ("+str(len(row))+") is not equal to number of columns in header ("+str(rowlen)+")\n\tHeader: "+str(header)+"\n\tOffending Row: "+str(row)) orows.append(row) except Exception as err: raise err if warnfile: try: meta['warns'] = anchore_utils.read_plainfile_tolist(warnfile) except: pass meta['queryparams'] = ','.join(params) meta['querycommand'] = cmd try: i = header.index('URL') meta['url_column_index'] = i except: pass meta['result'] = {} meta['result']['header'] = header meta['result']['rowcount'] = len(orows) try: #meta['result']['colcount'] = len(orows[0]) meta['result']['colcount'] = len(header) except: meta['result']['colcount'] = 0 meta['result']['rows'] = orows except Exception as err: self._logger.error("Query output handling failed: ") self._logger.error("\tCommand: " + str(cmd)) self._logger.error("\tException: " + str(err)) success = False finally: if imgfile and os.path.exists(imgfile): os.remove(imgfile) if outputdir and os.path.exists(outputdir): shutil.rmtree(outputdir) ret = [success, cmd, meta] return(ret)
def load_gate_whitelist(self, imageId): thefile = os.path.join(self.imagerootdir, imageId, 'anchore_gate.whitelist') return (anchore_utils.read_plainfile_tolist(thefile))
def execute_gates(self, image, refresh=True): self._logger.debug("gate policy evaluation for image " + str(image.meta['imagename']) + ": begin") success = True imagename = image.meta['imageId'] imagedir = image.anchore_imagedir gatesdir = '/'.join([self.config["scripts_dir"], "gates"]) workingdir = '/'.join([self.config['anchore_data_dir'], 'querytmp']) outputdir = workingdir if not self.force and os.path.exists(imagedir + "/gates.done"): self._logger.info(image.meta['shortId'] + ": evaluated.") return (True) self._logger.info(image.meta['shortId'] + ": evaluating policies ...") for d in [outputdir, workingdir]: if not os.path.exists(d): os.makedirs(d) imgfile = '/'.join( [workingdir, "queryimages." + str(random.randint(0, 99999999))]) anchore_utils.write_plainfile_fromstr(imgfile, image.meta['imageId']) if self.policy_override: policy_data = anchore_utils.read_plainfile_tolist( self.policy_override) policies = self.read_policy(policy_data) else: policies = self.get_image_policies(image) paramlist = list() for p in policies.keys(): for t in policies[p].keys(): if 'params' in policies[p][t] and policies[p][t]['params']: paramlist.append(policies[p][t]['params']) if len(paramlist) <= 0: paramlist.append('all') path_overrides = ['/'.join([self.config['user_scripts_dir'], 'gates'])] if self.config['extra_scripts_dir']: path_overrides = path_overrides + [ '/'.join([self.config['extra_scripts_dir'], 'gates']) ] results = scripting.ScriptSetExecutor( path=gatesdir, path_overrides=path_overrides).execute( capture_output=True, fail_fast=True, cmdline=' '.join([ imgfile, self.config['image_data_store'], outputdir, ' '.join(paramlist) ])) os.remove(imgfile) for r in results: (cmd, retcode, output) = r if retcode: self._logger.error("FAILED") self._logger.error("\tCMD: " + cmd) self._logger.error("\tEXITCODE: " + str(retcode)) self._logger.error("\tOUTPUT: " + output) success = False else: self._logger.debug("") self._logger.debug("\tCMD: " + cmd) self._logger.debug("\tEXITCODE: " + str(retcode)) self._logger.debug("\tOUTPUT: " + output) self._logger.debug("") if success: report = self.generate_gates_report(image) self.anchoreDB.save_gates_report(image.meta['imageId'], report) self._logger.info(image.meta['shortId'] + ": evaluated.") self._logger.debug("gate policy evaluation for image " + str(image.meta['imagename']) + ": end") return (success)
def evaluate_gates_results(self, image): ret = list() fullret = list() final_gate_action = 'GO' policies_whitelist = self.load_whitelist(image) global_whitelist = self.load_global_whitelist() if self.policy_override: policy_data = anchore_utils.read_plainfile_tolist(self.policy_override) policies = self.read_policy(policy_data) else: policies = self.get_image_policies(image) for m in policies.keys(): gdata = self.anchoreDB.load_gate_output(image.meta['imageId'], m) for l in gdata: (k, v) = re.match('(\S*)\s*(.*)', l).group(1, 2) imageId = image.meta['imageId'] check = m trigger = k output = v triggerId = hashlib.md5(''.join([check,trigger,output])).hexdigest() # if the output is structured (i.e. decoded as an # anchore compatible json string) then extract the # elements for display try: json_output = json.loads(output) if 'id' in json_output: triggerId = str(json_output['id']) if 'desc' in json_output: #output = output + " description="+outputdesc output = str(json_output['desc']) except: pass if k in policies[m]: trigger = k action = policies[check][trigger]['action'] #r = {'imageId':image.meta['imageId'], 'check':m, 'trigger':k, 'output':v, 'action':policies[m][k]['action']} r = {'imageId':imageId, 'check':check, 'triggerId':triggerId, 'trigger':trigger, 'output':output, 'action':action} # this is where whitelist check should go whitelisted = False whitelist_type = "none" if [m, triggerId] in global_whitelist: whitelisted = True whitelist_type = "global" elif r in policies_whitelist['ignore']: whitelisted = True whitelist_type = "image" fullr = {} fullr.update(r) fullr['whitelisted'] = whitelisted fullr['whitelist_type'] = whitelist_type fullret.append(fullr) if not whitelisted: if policies[m][k]['action'] == 'STOP': final_gate_action = 'STOP' elif final_gate_action != 'STOP' and policies[m][k]['action'] == 'WARN': final_gate_action = 'WARN' ret.append(r) else: # whitelisted, skip evaluation pass self.save_whitelist(image, policies_whitelist, ret) ret.append({'imageId':image.meta['imageId'], 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action}) fullret.append({'imageId':image.meta['imageId'], 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action, 'whitelisted':False, 'whitelist_type':"none", 'triggerId':"N/A"}) for i in ret: self.anchoreDB.del_gate_eval_output(image.meta['imageId'], i['check']) evals = {} for i in ret: if i['check'] not in evals: evals[i['check']] = list() evals[i['check']].append(' '.join([i['trigger'], i['action']])) for i in evals.keys(): self.anchoreDB.save_gate_eval_output(image.meta['imageId'], i, evals[i]) self.anchoreDB.save_gates_eval_report(image.meta['imageId'], ret) return(ret, fullret)