def processDexLoad(self, fileName, source, output, stack): logger.debug("Processing dex load message...") file_hash = getSha256(fileName) file_load_count = self._loaded_files_count.setdefault(file_hash, 0) + 1 newFilePath = self._rename_source_file(fileName, file_hash, str(file_load_count)) self._loaded_files_count[file_hash] = file_load_count self._codeFiles[newFilePath] = file_hash dexloadPathFromStack = self._getDexLoadPathFromStack(stack) if dexloadPathFromStack: srcFromStack = dexloadPathFromStack[0] throughMethod = dexloadPathFromStack[1] if dexloadPathFromStack in self._uncovered_dexload: self._uncovered_dexload.remove(dexloadPathFromStack) self._addDexloadPathToMCG(srcFromStack, throughMethod, newFilePath) #we do analyse files if appropriate dex load calls have found in sources of application #and if we have not analysed file yet if file_load_count > 1: logger.info("File [%s] with hash [%s] is loaded for the [%d]th time! Skipping its analysis!" % (newFilePath, file_hash, file_load_count)) else: self.makeFileAnalysis(newFilePath) return #if the stack does not contain a dexload method detected in the sources self._addSuspiciousDexload(newFilePath, stack) logger.debug("Dex load message processed!")
def processInvoke(self, cls, method, prototype, stack): logger.debug("Processing method invoke: [%s %s %s]..." % (cls, method, prototype)) #test code # print "\n" # for stackEntry in stack: # print stackEntry # print "\n" #end of test code invokeDstFromClient = (cls, method, prototype) invokePosInStack = self._findFirstInvokePos(stack) throughMethod = stack[invokePosInStack] invokeSrcFromStack = stack[invokePosInStack + 1] if invokePosInStack == -1: logger.info( "Cannot find the first occurrence of invoke method in the stack! Adding method to suspicious list!" ) self._addSuspiciousInvoke(throughMethod, invokeDstFromClient, stack) return for invokePathFromSources in self._sources_invoke: invokeSrcFromSources = invokePathFromSources[0] if invokeSrcFromSources != invokeSrcFromStack: continue self._addInvokePathToMCG(invokeSrcFromSources, throughMethod, invokeDstFromClient) if invokePathFromSources in self._uncovered_invoke: self._uncovered_invoke.remove(invokePathFromSources) return self._addSuspiciousInvoke(throughMethod, invokeDstFromClient, stack)
def runCommand(cmd, timeout_time=None, retry_count=3, return_output=True, stdin_input=None): """Spawn and retry a subprocess to run the given shell command. Args: cmd: shell command to run timeout_time: time in seconds to wait for command to run before aborting. retry_count: number of times to retry command return_output: if True return output of command as string. Otherwise, direct output of command to stdout. stdin_input: data to feed to stdin Returns: output of command """ result = None while True: try: result = runOnce(cmd, timeout_time=timeout_time, return_output=return_output, stdin_input=stdin_input) except WaitForResponseTimedOutError: if retry_count == 0: raise retry_count -= 1 logger.info("No response for %s, retrying" % cmd) else: # Success return result
def populate(self): logger.info("Populating Current VM instance ....") try: imds_url = config.get('imds', 'imds_url') response = requests.get(imds_url, headers={"Metadata":"true"}) response_txt = json.loads(response.text) #populate required instance variables self.vmId = response_txt['vmId'] self.name = response_txt['name'] self.location = response_txt['location'] self.subscriptionId = response_txt['subscriptionId'] self.vmScaleSetName = response_txt['vmScaleSetName'] self.resourceGroupName = response_txt['resourceGroupName'] self.tags = response_txt['tags'] #populate access_token accesstoken_url = config.get('imds', 'accesstoken_url') access_token_response = requests.get(accesstoken_url, headers={"Metadata":"true"}) access_token_text = json.loads(access_token_response.text) self.access_token = access_token_text['access_token'] logger.info("Returning populated VMInstance") except: logger.error("Error populating vm instance") return self
def addInvokePath(self, src, through, dst): src_class_name, src_method_name, src_descriptor = src dst_class_name, dst_method_name, dst_descriptor = dst through_class_name, through_method_name, through_descriptor = through key = "%s %s %s %s %s %s %s" % (src_class_name, src_method_name, src_descriptor, through_class_name, through_method_name, through_descriptor, POSTFIX_REFL_INVOKE) n1 = self._get_existed_node(key) if n1 == None: logger.warning("Something wrong has happened! Could not find invoke Node in Graph with key [%s]" % str(key)) return n2 = self._get_node(NODE_METHOD, (dst_class_name, dst_method_name, dst_descriptor)) n2.set_attribute(ATTR_CLASS_NAME, dst_class_name) n2.set_attribute(ATTR_METHOD_NAME, dst_method_name) n2.set_attribute(ATTR_DESCRIPTOR, dst_descriptor) self.G.add_edge(n1.id, n2.id) #check if called method calls protected feature data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor) if data in DVM_PERMISSIONS_BY_API_CALLS: logger.info("BINGOOOOOOO! The protected method is called through reflection!") perm = DVM_PERMISSIONS_BY_API_CALLS[ data ] key1 = "%s %s %s %s %s %s %s %s" % (through_class_name, through_method_name, through_descriptor, dst_class_name, dst_method_name, dst_descriptor, POSTFIX_PERM, perm) n3 = self._get_node(NODE_FAKE_PERMISSION, key1, perm, False) n3.set_attribute(ATTR_CLASS_NAME, dst_class_name) n3.set_attribute(ATTR_METHOD_NAME, dst_method_name) n3.set_attribute(ATTR_DESCRIPTOR, dst_descriptor) n3.set_attribute(ATTR_PERM_NAME, perm) n3.set_attribute(ATTR_PERM_LEVEL, MANIFEST_PERMISSIONS[ perm ][0]) self.G.add_edge(n2.id, n3.id)
def checkExit(secconAnalyser, stopIfAllSuspiciousMethodsAreAnalysed=False): #logger.debug("Checking if we can exit...") if not secconAnalyser.containMethodsToAnalyse(): logger.info("All suspicious methods are analysed! We can exit!") if stopIfAllSuspiciousMethodsAreAnalysed: logger.info("Generating KeyboardInterrupt to exit!") raise KeyboardInterrupt
def processDexLoad(self, fileName, source, output, stack): logger.debug("Processing dex load message...") file_hash = getSha256(fileName) file_load_count = self._loaded_files_count.setdefault(file_hash, 0) + 1 newFilePath = self._rename_source_file(fileName, file_hash, str(file_load_count)) self._loaded_files_count[file_hash] = file_load_count self._codeFiles[newFilePath] = file_hash dexloadPathFromStack = self._getDexLoadPathFromStack(stack) if dexloadPathFromStack: srcFromStack = dexloadPathFromStack[0] throughMethod = dexloadPathFromStack[1] if dexloadPathFromStack in self._uncovered_dexload: self._uncovered_dexload.remove(dexloadPathFromStack) self._addDexloadPathToMCG(srcFromStack, throughMethod, newFilePath) #we do analyse files if appropriate dex load calls have found in sources of application #and if we have not analysed file yet if file_load_count > 1: logger.info( "File [%s] with hash [%s] is loaded for the [%d]th time! Skipping its analysis!" % (newFilePath, file_hash, file_load_count)) else: self.makeFileAnalysis(newFilePath) return #if the stack does not contain a dexload method detected in the sources self._addSuspiciousDexload(newFilePath, stack) logger.debug("Dex load message processed!")
def processInvoke(self, cls, method, prototype, stack): logger.debug("Processing method invoke: [%s %s %s]..."% (cls, method, prototype)) #test code # print "\n" # for stackEntry in stack: # print stackEntry # print "\n" #end of test code invokeDstFromClient = (cls, method, prototype) invokePosInStack = self._findFirstInvokePos(stack) throughMethod = stack[invokePosInStack] invokeSrcFromStack = stack[invokePosInStack + 1] if invokePosInStack == -1: logger.info("Cannot find the first occurrence of invoke method in the stack! Adding method to suspicious list!") self._addSuspiciousInvoke(throughMethod, invokeDstFromClient, stack) return for invokePathFromSources in self._sources_invoke: invokeSrcFromSources = invokePathFromSources[0] if invokeSrcFromSources != invokeSrcFromStack: continue self._addInvokePathToMCG(invokeSrcFromSources, throughMethod, invokeDstFromClient) if invokePathFromSources in self._uncovered_invoke: self._uncovered_invoke.remove(invokePathFromSources) return self._addSuspiciousInvoke(throughMethod, invokeDstFromClient, stack)
def mount_volume(path, password): logger.info("mounting %s with pw %s" % (path, "****")) if password == None: logger.error("cannot mount volume, missing password") return False command = ["truecrypt", "-t", "--non-interactive", "--fs-options=user,umask=000", "--mount", path, "-p", password] logger.debug(" ".join(command)) return subprocess.call(command) == 0
def process_IN_DELETE(self, event): logger.debug("DELETED: %s" % os.path.join(event.path, event.name)) fullname = os.path.join(event.path, event.name) if fullname == self.fullname: self.handler.on_delete(fullname) elif event.name == self.filename: logger.info("matched file-DELETED: %s" % os.path.join(event.path, event.name)) self.recalcWatchDir()
def findDestDirectory(base, entries): logger.info("finding dest-directory for " + base) logger.debug(entries) for name, e in entries.items(): match = e.search(base) if match is not None: logger.info("found match for " + base + ": " + name) return name
def on_create(self, path): password = keychain.get_password(path) if password == None: ask_password_and_mount(path) if mount_volume(path, password): logger.info("mounted path %s" % path) else: ask_password_and_mount(path)
def start_watching_disk_file(filename, handler): logger.info("start watching %s" % filename) wm = WatchManager() notifier = ThreadedNotifier(wm, PTmp(wm, filename, handler)) notifier.start() notifiers[filename] = notifier if os.path.exists(filename): handler.on_create(filename)
def _addDexloadPathToMCG(self, src, through, filename): logger.debug("Adding dexload method path to our graph...") tupl = (src, through, filename) if tupl not in self._covered_dexload: self._covered_dexload.append(tupl) self._stadynaMcg.addDexloadPath(src, through, filename) logger.info("The path [%s] -- [%s] through [%s] for dexload is added to our graph!" % (str(src), str(filename), str(through))) else: logger.info("The path [%s] -- [%s] through [%s] for dexload is already in our graph!" % (str(src), str(filename), str(through)))
def _addNewInstancePathToMCG(self, src, through, dst): logger.debug("Adding newInstance method path to our graph...") tupl = (src, through, dst) if tupl not in self._covered_newInstance: self._covered_newInstance.append(tupl) self._stadynaMcg.addNewInstancePath(src, through, dst) logger.info("The path [%s] -- [%s] through [%s] for newInstance method is added to our graph!" % (str(src), str(dst), str(through))) else: logger.info("The path [%s] -- [%s] through [%s] for newInstance method is already in our graph!" % (str(src), str(dst), str(through)))
def checkOutputPath(dst): if os.path.exists(dst): if not os.path.isdir(dst): logger.error("[%s] is not a directory!" % dst) return False else: logger.info("The path [%s] does not exist! Creating it!" % dst) os.makedirs(dst) return True
def failLoadBalancerProbes(): logger.info("Failing Health Probes") try: kill_health_probe = config.get('shell-commands', 'kill_health_probe_process') # Delete all cron jobs kill_process = os.system(kill_health_probe) if kill_process is not 0: logger.error("Error killing health probe") except: logger.error("Error in failing health probe")
def deleteVMFromVMSS(): logger.info("Deleting the VM from VMSS") vm_delete_url = config.get('vmss', 'vm_delete_url') formatted_url = vm_delete_url.format(subscriptionId = vmInstance.subscriptionId, \ resourceGroupName = vmInstance.resourceGroupName,\ vmScaleSetName = vmInstance.vmScaleSetName, instanceId = vmInstance.vmId) logger.info("The Delete URL is - " + formatted_url) requests.delete(formatted_url, data={}, auth=BearerAuth(vmInstance.access_token))
def main(): if len(sys.argv) < 2: print("usage: ", sys.argv[0], "<device-file>") return if not os.path.exists(configfile): logger.info("creating configfile {}".format(configfile)) write_json_file(configfile, dict(truecryptvolumes = [])) reffile = sys.argv[1] password = getpass.getpass() initial_mount(reffile, password) logger.info(find_unique_dev_path(reffile))
def main(): if not os.path.exists(configfile): write_json_file(configfile, dict(volumes = dict())) known = read_json_file(configfile)["volumes"] truecryptvolumes = [ x.encode('ascii','replace') for (x,y) in known.iteritems() if y == "truecrypt" ] for f in truecryptvolumes: if len(f) > 0: start_watching_disk_file(f, TruecryptFileHandler()) print f logger.info("initialized")
def moveFileToDir(srcpath, dest): if not os.path.exists(dest): logger.info("creating dir %s" % dest) os.mkdir(dest) fname = os.path.basename(srcpath) logger.info("moving from %s to %s", srcpath, dest) destpath = os.path.join(dest, fname) while os.path.exists(destpath): destpath += "_" os.rename(srcpath, destpath) print("os.rename(%s, os.path.join(%s, %s))".format(srcpath, dest, fname)) return
def matching(self): self.engage = self.match() num = 0 # 正确判断的anchor links sums = 0 # 所有判断的anchor links for item in self.engage.keys(): if item == self.engage[item]: if item not in self.f_anchor_known_list: num = num + 1 sums = sums + 1 self.right = num self.all = sums logger.info(str(self.right) + ',' + str(self.all))
def run(self): while True: while not self._messages.empty(): line = self._messages.get() print 'Received line: ' + repr(line) load = json.loads(line) print load time.sleep(.1) if self._stop: logger.info("Terminating Stadyna message processor thread.") break
def stopCustomMetricFlow(): logger.info("Stopping the Custom Metrics") removeCrontab = config.get('shell-commands', 'remove_all_crontab') #removeCrontab = "crontab -r" logger.info("Deleting all cron jobs") # Delete all cron jobs areCronsRemoved = os.system(removeCrontab) if areCronsRemoved is not 0: logger.error("Error deleting Cron jobs, health probe will not fail")
def _addDexloadPathToMCG(self, src, through, filename): logger.debug("Adding dexload method path to our graph...") tupl = (src, through, filename) if tupl not in self._covered_dexload: self._covered_dexload.append(tupl) self._stadynaMcg.addDexloadPath(src, through, filename) logger.info( "The path [%s] -- [%s] through [%s] for dexload is added to our graph!" % (str(src), str(filename), str(through))) else: logger.info( "The path [%s] -- [%s] through [%s] for dexload is already in our graph!" % (str(src), str(filename), str(through)))
def enableAdbRoot(self): """Enable adb root on device.""" output = self.sendCommand("root") if "adbd is already running as root" in output: return True elif "restarting adbd as root" in output: # device will disappear from adb, wait for it to come back time.sleep(2) self.sendCommand("wait-for-device") return True else: logger.info("Unrecognized output from adb root: %s" % output) return False
def _addNewInstancePathToMCG(self, src, through, dst): logger.debug("Adding newInstance method path to our graph...") tupl = (src, through, dst) if tupl not in self._covered_newInstance: self._covered_newInstance.append(tupl) self._stadynaMcg.addNewInstancePath(src, through, dst) logger.info( "The path [%s] -- [%s] through [%s] for newInstance method is added to our graph!" % (str(src), str(dst), str(through))) else: logger.info( "The path [%s] -- [%s] through [%s] for newInstance method is already in our graph!" % (str(src), str(dst), str(through)))
def postEvent(app, event, detail, result, data): laconfig = getSecret('la_config') base64string = base64.encodebytes(('%s:%s' % (laconfig['username'], laconfig['password'])).encode()).decode().replace('\n', '') headers = {'content-type': 'application/json', 'Authorization': 'Basic ' + base64string} url = laconfig['postevent'] + "app=" + app +"&eventname=" + event + "&detail=" + detail + "&result=" + result + "&data=" + data response = requests.post(url, headers=headers) logger.info('Log to LA-Config: {}, {}, {}, {}'.format(event, detail, result, data)) logger.info('Log response - Status: {}, Response: {}'.format(response.status_code, response.text))
def moveFileToCorrectDirectory(targetpath, sourcefilepath, newfilename): filename = os.path.basename(sourcefilepath) match = re.search("^[0-9]+", newfilename) if match is None: logger.warn("cannot determine season-dir for %s" % filename) return season = match.group() targetdir = os.path.join(targetpath, season) if os.path.dirname(sourcefilepath) == targetdir: logger.info("%s is in the correct directory" % filename) return logger.info("%s should be moved" % filename) if not os.path.exists(targetdir): os.makedirs(targetdir) moveAndPrintProgress(sourcefilepath, os.path.join(targetdir, newfilename))
def deleteVMFromVMSS(): logger.info("Deleting the VM from VMSS, Id: " + vmInstance.vmId) vm_delete_url = config.get('vmss', 'vm_delete_url') formatted_url = vm_delete_url.format(subscriptionId = vmInstance.subscriptionId, \ resourceGroupName = vmInstance.resourceGroupName,\ vmScaleSetName = vmInstance.vmScaleSetName) logger.info("The Delete URL is - " + formatted_url) data = {"instanceIds": [vmInstance.vmId]} headers = {} headers['Authorization'] = "Bearer " + vmInstance.access_token requests.post(formatted_url, data=data, headers=headers)
def addNewInstancePath(self, src, through, dst): src_class_name, src_method_name, src_descriptor = src dst_class_name, dst_method_name, dst_descriptor = dst through_class_name, through_method_name, through_descriptor = through key = "%s %s %s %s %s %s %s" % ( src_class_name, src_method_name, src_descriptor, through_class_name, through_method_name, through_descriptor, POSTFIX_REFL_NEWINSTANCE) n1 = self._get_existed_node(key) if n1 == None: logger.error( "Something wrong has happened! Could not find Node in Graph with key [%s]" % str(key)) return n2 = self._get_node(NODE_CONSTRUCTOR, (dst_class_name, dst_method_name, dst_descriptor)) n2.set_attribute(ATTR_CLASS_NAME, dst_class_name) n2.set_attribute(ATTR_METHOD_NAME, dst_method_name) n2.set_attribute(ATTR_DESCRIPTOR, dst_descriptor) self.G.add_edge(n1.id, n2.id) #we also need to add link to the class node #TODO: Think in the future what to do with this n_class = self._get_node(NODE_FAKE_CLASS, dst_class_name, None, False) n_class.set_attribute(ATTR_CLASS_NAME, dst_class_name) self.G.add_edge(n_class.id, n2.id) #checking if we need to add additional permission nodes data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor) if data in DVM_PERMISSIONS_BY_API_CALLS: logger.info( "BINGOOOOOOO! The protected method is called through reflection!" ) perm = DVM_PERMISSIONS_BY_API_CALLS[data] key1 = "%s %s %s %s %s %s %s %s" % ( through_class_name, through_method_name, through_descriptor, dst_class_name, dst_method_name, dst_descriptor, POSTFIX_PERM, perm) n3 = self._get_node(NODE_FAKE_PERMISSION, key1, perm, False) n3.set_attribute(ATTR_CLASS_NAME, dst_class_name) n3.set_attribute(ATTR_METHOD_NAME, dst_method_name) n3.set_attribute(ATTR_DESCRIPTOR, dst_descriptor) n3.set_attribute(ATTR_PERM_NAME, perm) n3.set_attribute(ATTR_PERM_LEVEL, MANIFEST_PERMISSIONS[perm][0]) self.G.add_edge(n2.id, n3.id)
def cut_k_list(self, k): logger.debug("k is %s" % (str(k))) # 产生配对的dict,设置k为3 four,tw 1,0,这里设置k four_result_list = dict() tw_result_list = dict() logger.info(len(self.f_anchor_known_list)) for line in self.f: x_list = line.split(';') if len(x_list) == 3: rate = eval(x_list[2])[0] if x_list[0] in four_result_list: four_result_list[x_list[0]][0].append(rate) four_result_list[x_list[0]][1].append( x_list[1]) # 对应的twitter名 else: four_result_list[x_list[0]] = [[rate], [x_list[1]]] if x_list[1] in tw_result_list: tw_result_list[x_list[1]][0].append(rate) tw_result_list[x_list[1]][1].append( x_list[0]) # 对应的twitter名 else: tw_result_list[x_list[1]] = [[rate], [x_list[0]]] for item in four_result_list.keys(): s = dict() for i in range(0, len(four_result_list[item][1])): s[four_result_list[item][1][i]] = four_result_list[item][0][i] s = sorted(s.items(), key=lambda j: j[1], reverse=True) s = map(lambda x: x[0], s) four_result_list[item] = list(s) if k is not None: if len(four_result_list[item]) > k - 1: four_result_list[item] = four_result_list[item][0:k] for item in tw_result_list.keys(): s = dict() for i in range(0, len(tw_result_list[item][1])): s[tw_result_list[item][1][i]] = tw_result_list[item][0][i] s = sorted(s.items(), key=lambda j: j[1], reverse=True) s = map(lambda x: x[0], s) tw_result_list[item] = list(s) if k is not None: if len(tw_result_list[item]) > k - 1: tw_result_list[item] = tw_result_list[item][0:k] self.four_result = four_result_list self.tw_result = tw_result_list
def run(self): if not self._device.is_alive(): return self._device.clean_logcat() process = self._device.get_logcat() msg = None res = False for line in iter(process.stdout.readline, ''): res = self.parseSecconMsg(line) if res != None: self._messages.put(res) if self._stop: logger.info("Terminating thread that reads 'adb logcat' output.") process.terminate() #stops the child process break process.stdout.close()
def writebody(): global instance_status metadata = InstanceMetadata().populate() logger.info(metadata) if instance_status == 200 and metadata.isPendingDelete(): instance_status = 410 # HTTP Status Code 410 (Gone) response.status = instance_status status = 'Healthy' if instance_status == 410: status = 'Gone' body = '<html><head><title>VM Health Check</title></head>' body += '<body><h2> ' + metadata.name + ' is ' + status + ' </h2><ul><h3></body></html>' return body
def run(self): if not self._device.is_alive(): return self._device.clean_logcat() process = self._device.get_logcat() msg = None res = False for line in iter(process.stdout.readline, ''): res = self.parseSecconMsg(line) if res != None: self._messages.put(res) if self._stop: logger.info( "Terminating thread that reads 'adb logcat' output.") process.terminate() #stops the child process break process.stdout.close()
def pull(self, src, dest): """Pulls the file src on the device onto dest on the host. Args: src: absolute file path of file on device to pull dest: destination file path on host Returns: True if success and False otherwise. """ # Create the base dir if it doesn't exist already if not os.path.exists(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest)) if self.doesFileExist(src): self.sendCommand("pull %s %s" % (src, dest), timeout_time=60) return True else: logger.info("ADB pull Failed: Source file %s does not exist." % src) return False
def addInvokePath(self, src, through, dst): src_class_name, src_method_name, src_descriptor = src dst_class_name, dst_method_name, dst_descriptor = dst through_class_name, through_method_name, through_descriptor = through key = "%s %s %s %s %s %s %s" % ( src_class_name, src_method_name, src_descriptor, through_class_name, through_method_name, through_descriptor, POSTFIX_REFL_INVOKE) n1 = self._get_existed_node(key) if n1 == None: logger.warning( "Something wrong has happened! Could not find invoke Node in Graph with key [%s]" % str(key)) return n2 = self._get_node(NODE_METHOD, (dst_class_name, dst_method_name, dst_descriptor)) n2.set_attribute(ATTR_CLASS_NAME, dst_class_name) n2.set_attribute(ATTR_METHOD_NAME, dst_method_name) n2.set_attribute(ATTR_DESCRIPTOR, dst_descriptor) self.G.add_edge(n1.id, n2.id) #check if called method calls protected feature data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor) if data in DVM_PERMISSIONS_BY_API_CALLS: logger.info( "BINGOOOOOOO! The protected method is called through reflection!" ) perm = DVM_PERMISSIONS_BY_API_CALLS[data] key1 = "%s %s %s %s %s %s %s %s" % ( through_class_name, through_method_name, through_descriptor, dst_class_name, dst_method_name, dst_descriptor, POSTFIX_PERM, perm) n3 = self._get_node(NODE_FAKE_PERMISSION, key1, perm, False) n3.set_attribute(ATTR_CLASS_NAME, dst_class_name) n3.set_attribute(ATTR_METHOD_NAME, dst_method_name) n3.set_attribute(ATTR_DESCRIPTOR, dst_descriptor) n3.set_attribute(ATTR_PERM_NAME, perm) n3.set_attribute(ATTR_PERM_LEVEL, MANIFEST_PERMISSIONS[perm][0]) self.G.add_edge(n2.id, n3.id)
def delete_vmss_instance(): ##MSI based authentication logger.info("Entry Point of delete_vmss_instance") credentials = MSIAuthentication() metadata = InstanceMetadata().populate() subscription_id = metadata.subscriptionId #resource_client = ResourceManagementClient(credentials, subscription_id) compute_client = ComputeManagementClient(credentials, subscription_id) #network_client = NetworkManagementClient(credentials, subscription_id) resourceGroupName = metadata.resourceGroupName vmScaleSetName = metadata.vmScaleSetName vmname = metadata.name logger.info("metadata.name : " + vmname) vm_id = vmname.split("_") convertedInt_vm_id = int(vm_id[1]) #host_name = socket.gethostname() #vmid = hostname_to_vmid(host_name) compute_client.virtual_machine_scale_set_vms.delete(resourceGroupName, vmScaleSetName, convertedInt_vm_id)
def post_metrics(): global cpu_percent logger.info("Posting Custom metrics.....") metric_post_url = config.get('monitor', 'metric_post_url') formatted_url = metric_post_url.format(location = vmInstance.location, subscriptionId = vmInstance.subscriptionId, \ resourceGroupName = vmInstance.resourceGroupName,\ resourceName = vmInstance.name) data = getMetricPostData(cpu_percent) logger.info("Data: " + json.dumps(data)) headers = config.get('monitor', 'metric_headers'); headers = json.loads(headers) headers['Content-Length'] = str(len(data)) headers['Authorization'] = "Bearer " + vmInstance.access_token #formatted_headers = headers.format(clength = len(data)) #formatted_headers['Authorization'] = "Bearer " + vmInstance.access_token logger.info("headers: " + json.dumps(headers)) requests.post(formatted_url, json=data, headers=headers)
def _process_ubc_get_imsi(self, message, address): """process the message packet form client. message:type:dict address:type:tuple """ response = self._response self.report_imsi_num_4G += 1 logger.debug('received get_imsi_ubc packet, it is ready to send set_imsi_ubc packet') imsi = message.get(MsgKey.imsi) table_name, store_code = Tool.parse_imsi(imsi, mnc_2_tablename) #如果移动联通4G翻译模块数量为0,则直接拒绝 if table_name == mobile_table_name and self.mobile_4G_empty_ue_num <= 0 or \ table_name == union_table_name and self.union_4G_empty_ue_num == 0 : response(address, id=RspMsgIdValue.ubc_set_imsi, ubc_info=[[imsi, Ubc.REJECT,0]]) logger.info("No free translation modules are available") return #移动联通4G翻译模块数量不为0,查询数据库 try: row = self._query_database(table_name, store_code) # 该号码翻译过,在数据库中能找到记录,给4g设备发送拒绝消息 if row: logger.debug(imsi + '号码翻译过') response(address, id=RspMsgIdValue.ubc_set_imsi, ubc_info=[[imsi, Ubc.REJECT, 0]]) # 该imsi没翻译过,指派到2G else: logger.debug(imsi + '号码没翻译过') # 给4g设备发送指派消息 response(address, id=RspMsgIdValue.ubc_set_imsi, ubc_info=[[imsi, Ubc.REDIRECT, arfcn.get(table_name)]]) self.database.insert(table_name, store_code) # 将指派的imsi发送给2g response(self.address_2GC, id=RspMsgIdValue.ntc_set_imsi, imsilist=[imsi], modulelist=[table_name]) self._ntc_update_empty_ue_num(table_name) except Exception as e: logger.error(str(e.args) + str(e.message))
def addNewInstancePath(self, src, through, dst): src_class_name, src_method_name, src_descriptor = src dst_class_name, dst_method_name, dst_descriptor = dst through_class_name, through_method_name, through_descriptor = through key = "%s %s %s %s %s %s %s" % (src_class_name, src_method_name, src_descriptor, through_class_name, through_method_name, through_descriptor, POSTFIX_REFL_NEWINSTANCE) n1 = self._get_existed_node(key) if n1 == None: logger.error("Something wrong has happened! Could not find Node in Graph with key [%s]" % str(key)) return n2 = self._get_node(NODE_CONSTRUCTOR, (dst_class_name, dst_method_name, dst_descriptor)) n2.set_attribute(ATTR_CLASS_NAME, dst_class_name) n2.set_attribute(ATTR_METHOD_NAME, dst_method_name) n2.set_attribute(ATTR_DESCRIPTOR, dst_descriptor) self.G.add_edge(n1.id, n2.id) #we also need to add link to the class node #TODO: Think in the future what to do with this n_class = self._get_node(NODE_FAKE_CLASS, dst_class_name, None, False) n_class.set_attribute(ATTR_CLASS_NAME, dst_class_name) self.G.add_edge(n_class.id, n2.id) #checking if we need to add additional permission nodes data = "%s-%s-%s" % (dst_class_name, dst_method_name, dst_descriptor) if data in DVM_PERMISSIONS_BY_API_CALLS: logger.info("BINGOOOOOOO! The protected method is called through reflection!") perm = DVM_PERMISSIONS_BY_API_CALLS[ data ] key1 = "%s %s %s %s %s %s %s %s" % (through_class_name, through_method_name, through_descriptor, dst_class_name, dst_method_name, dst_descriptor, POSTFIX_PERM, perm) n3 = self._get_node(NODE_FAKE_PERMISSION, key1, perm, False) n3.set_attribute(ATTR_CLASS_NAME, dst_class_name) n3.set_attribute(ATTR_METHOD_NAME, dst_method_name) n3.set_attribute(ATTR_DESCRIPTOR, dst_descriptor) n3.set_attribute(ATTR_PERM_NAME, perm) n3.set_attribute(ATTR_PERM_LEVEL, MANIFEST_PERMISSIONS[ perm ][0]) self.G.add_edge(n2.id, n3.id)
def set_follow(self, random_num): logger.debug("save follow") tw_f = open('E:/graduate/twitter_foursquare/twitter/following') for line in tw_f: user1 = line.split('\t')[0] user2 = line.split('\t')[1].split('\n')[0] if self.tw_G.has_node(user1) and self.tw_G.has_node(user2): if self.tw_G.has_edge(user2, user1): self.tw_G.edges[user2, user1]["weight"] = 2 if user1 in self.t_anchor_known_list and user2 in self.f_anchor_known_list: f_user1 = self.tw_G.node[user1]["fourID"] f_user2 = self.tw_G.node[user2]["fourID"] self.four_G.add_edge(f_user1, f_user2, weight=2) else: self.tw_G.add_edge(user1, user2, weight=1) if user1 in self.t_anchor_known_list and user2 in self.f_anchor_known_list: f_user1 = self.tw_G.node[user1]["fourID"] f_user2 = self.tw_G.node[user2]["fourID"] self.four_G.add_edge(f_user1, f_user2, weight=1) tw_f.close() f = open( 'E:/graduate/twitter_foursquare/foursquare/users/user_following') for line in f: user1 = line.split('\t')[0] user2 = line.split('\t')[1].split('\n')[0] if self.four_G.has_node(user1) and self.four_G.has_node(user2): if self.four_G.has_edge(user2, user1): self.four_G.edges[user2, user1]["weight"] = 2 if user1 in self.f_anchor_known_list and user2 in self.f_anchor_known_list: t_user1 = self.four_G.node[user1]["twitterID"] t_user2 = self.four_G.node[user2]["twitterID"] if self.tw_G.has_edge(t_user1, t_user2): self.tw_G.edges[t_user2, t_user1]["weight"] = 2 else: self.four_G.add_edge(user1, user2, weight=1) if user1 in self.f_anchor_known_list and user2 in self.f_anchor_known_list: t_user1 = self.four_G.node[user1]["twitterID"] t_user2 = self.four_G.node[user2]["twitterID"] self.tw_G.add_edge(t_user1, t_user2, weight=1) # 删除twitter中的单向关系 self.tw_G.remove_edges_from([ e for e in self.tw_G.edges() if self.tw_G.edges[e]["weight"] == 1 ]) # 删除边缘节点 vdict = self.four_G.degree() r_node = [v for v in self.four_G if vdict[v] < 1] for v in r_node: if v in self.f_all_anchor_list: self.f_all_anchor_list.remove(v) self.t_all_anchor_list.remove(self.four_G.node[v]['twitterID']) self.four_G.remove_nodes_from(r_node) vdict = self.tw_G.degree() r_node = [v for v in self.tw_G if vdict[v] < 1] for v in r_node: if v in self.t_all_anchor_list: self.t_all_anchor_list.remove(v) self.f_all_anchor_list.remove(self.tw_G.node[v]['fourID']) self.tw_G.remove_nodes_from(r_node) # 生成事先知道的锚用户 self.random_list = [ 168, 350, 1286, 120, 803, 1093, 736, 130, 1187, 1210, 50, 579, 604, 1135, 1278, 1326, 330, 875, 1390, 95, 173, 698, 1067, 665, 1457, 458, 1281, 1197, 789, 147, 641, 1085, 1239, 13, 512, 343, 808, 539, 1402, 575, 829, 88, 1219, 346, 1221, 911, 1103, 1002, 426, 1414, 772, 373, 104, 162, 666, 809, 307, 1391, 887, 1463, 1060, 448, 1238, 1438, 1164, 419, 85, 949, 756, 7, 896, 954, 1481, 1399, 1468, 1251, 847, 398, 286, 1044, 595, 1176, 622, 1009, 1173, 530, 1094, 107, 1188, 1101, 1070, 793, 450, 1016, 746, 706, 1134, 719, 625, 909, 974, 1317, 890, 1019, 823, 962, 639, 587, 918, 1181, 294, 748, 425, 280, 1150, 1376, 226, 475, 661, 877, 599, 714, 917, 786, 124, 101, 160, 1350, 237, 205, 317, 920, 204, 1227, 105, 1448, 77, 649, 1245, 873, 15, 1088, 966, 672, 366, 1436, 1405, 1112, 175, 251, 108, 969, 1100, 674, 906, 1379, 1035, 891, 1108, 57, 1306, 263, 364, 1119, 1299, 463, 1032, 729, 281, 971, 610, 732, 731, 460, 1471, 945, 761, 525, 78, 150, 468, 1358, 38, 245, 1147, 1024, 788, 1124, 157, 831, 1470, 720, 2, 952, 1480, 279, 109, 1183, 424, 1268, 523, 276, 1036, 28, 930, 380, 540, 1454, 934, 1437, 912, 106, 1460, 697, 899, 801, 296, 686, 1171, 305, 895, 946, 103, 1000, 704, 807, 191, 755, 1250, 1406, 321, 987, 943, 1450, 1469, 1143, 1395, 1479, 135, 1021, 769, 1247, 1426, 614, 1374, 1318, 148, 1081, 678, 922, 423, 898, 629, 96, 230, 859, 222, 27, 312, 1010, 481, 269, 1079, 991, 1389, 832, 358, 90, 394, 257, 411, 667, 1475, 495, 500, 692, 711, 1242, 445, 754, 329, 1284, 180, 1220, 693, 618, 747, 1089, 1215, 1413, 229, 781, 1082, 368, 75, 267, 241, 370, 270, 824 ] # self.random_list = random.sample(range(0, len(self.f_all_anchor_list)), random_num) # 随机生成提前知道的anchor user数 for i in self.random_list: self.f_anchor_known_list.append(self.f_all_anchor_list[i]) self.four_G.node[self.f_all_anchor_list[i]]["in_test"] = 1 self.t_anchor_known_list.append(self.t_all_anchor_list[i]) logger.info(self.random_list) logger.info(nx.info(self.tw_G)) logger.info(nx.info(self.four_G)) print("length of already known anchor list:", len(self.f_anchor_known_list), len(self.t_anchor_known_list)) print("length of all anchor list:", len(self.f_all_anchor_list), len(self.t_all_anchor_list))
def _addSuspiciousNewInstance(self, throughMethod, dst, stack): logger.info("The destination for newInstance method [%s] was not found in the list of uncovered methods! Adding it to the list of suspicious methods!" % str(dst)) self._suspicious_newInstance.append((throughMethod, dst, stack))
except KeyError: logger.error('Error returning required fields') continue # Run only for interactive site or all sites as part of scheduled process if run_mode == 'interactive' and inst_shortname == interactive_site or run_mode == 'testing' and inst_shortname == interactive_site: # select inst/input credentials path = ladata_dir + '/activity/delete-request/' file = 'deletions.tsv' # create new log file for each site to track errors if not not os.path.exists(path + 'logs/'): os.makedirs(path + 'logs/') logger.info('Starting ' + app + ': ' + inst_shortname) postEvent(app, 'starting-inst', inst_shortname, "", '') if os.path.isfile(path + file): try: deletions = pd.read_csv(path + file, sep='\t') except IOError: logger.error('File not found, ensure that the file is correctly named and in the correct directory') base64string = base64.encodebytes(('%s:%s' % (xapi_username, xapi_password)).encode()).decode().replace( '\n', '') xapi_headers = {'content-type': 'application/json', 'Authorization': 'Basic ' + base64string} xapi_batch_delete(deletions, xapi_headers) # log completion status
def _addSuspiciousDexload(self, filePath, stack): logger.info("No dexload method is found in the stack! Leaving the file [%s] for analysis! Adding it to suspicious dexload files!" % str(filePath)) self._suspicious_dexload.append((filePath, stack))
def on_delete(self, path): subprocess.call(["truecrypt", "-d", path]) logger.info("deleted path %s " % path)
def performCustomOperation(): logger.info("Performing custom operation")
#!/usr/bin/python3 from logconfig import logger from configuration import config # from vminstance import VMInstance from InstanceMetadata import InstanceMetadata from bearer_token import BearerAuth import requests, json, os vmInstance = InstanceMetadata().populate() logger.info(vmInstance) """ Check if the vm needs to be deleted (Merged into InstanceMedata.py) """ def isInstanceinPendingDelete(): deleteTag = config.get('imds', 'pending_delete_tag') if deleteTag in vmInstance.tags: return True else: return False """ Here is where we need to put all the custom tasks that need to be performed """ def performCustomOperation(): logger.info("Performing custom operation") ## This is where the custom logic will go
def perform_analysis(inputApkPath, resultsDirPath, sourceFilesDirPath): logger.debug("Starting analysis of the application [%s]..." % inputApkPath) startTime = time.time() if not copyFileToDir(inputApkPath, sourceFilesDirPath): logger.error("Could not copy source file to directory! The analysis was not performed!") return apkFileNameExt = os.path.basename(inputApkPath) apkFileName, _ = os.path.splitext(apkFileNameExt) apkFilePath = os.path.join(sourceFilesDirPath, apkFileNameExt) stadynaAnalyser = StadynaAnalyser() stadynaAnalyser.makeInitialAnalysis(apkFilePath) initial_name = apkFileName + "_initial" stadynaAnalyser.saveGexf(resultsDirPath, initial_name) if not stadynaAnalyser.containMethodsToAnalyse(): logger.info("Input apk file does not contain suspicious methods!") stadynaAnalyser.performInfoSave(resultsDirPath, apkFileName) logger.info("The analysis is finished!") return stadynaAnalyser.printMoiLists() dev = getDeviceForDynAnalysis() if not dev.is_alive(): logger.warning("The selected device to perform dynamic analysis is not alive! Finishing!") stadynaAnalyser.performInfoSave(resultsDirPath, apkFileName) logger.info("The analysis is finished!") return #TODO: Check if it is possible racing conditions here #If we at first install application and then run Message analyser #everything is fine. #If we at first start Message analyser and then start the application #the first dex load is actual load when the application is installed. androApk = apk.APK(inputApkPath) installed = dev.install_package(inputApkPath) if not installed: logger.error("An error occurred during the installation of the app [%s]! Cannot perform an analysis!" % inputApkPath) return package = androApk.get_package() mainActivity = androApk.get_main_activity() uid = dev.get_package_uid(package) if uid == -1: logger.error("Cannot get the uid of the package [%s]! Cannot start an analysis!" % package) return #TODO: test section messages = Queue.Queue() seccon_producer = SecconMessageProducer(dev, messages) seccon_producer.setDaemon(False) seccon_producer.start() #sleeping 3sec before starting new activity time.sleep(3) #Add here the invocation of the main activity startMainActivity(dev, package, mainActivity) while 1: while not messages.empty(): line = messages.get() #print 'Received line: ' + repr(line) decodedLine = json.loads(line) if int(decodedLine.get(consts.JSON_UID)) != uid: continue analyseStadynaMsg(dev, sourceFilesDirPath, stadynaAnalyser, decodedLine) stadynaAnalyser.printMoiLists() try: time.sleep(2) #The same method can be used for different calls. Thus, we comment this line now ) #checkExit(stadynaAnalyser) except KeyboardInterrupt: logger.debug("Exiting...") break seccon_producer.stopThread() seccon_producer.join() endTime = time.time() stadynaAnalyser.performFinalInfoSave(resultsDirPath, apkFileName, (endTime-startTime)) logger.info("The analysis is finished!")
def stop_watching_disk_file(filename): logger.info("stop watching %s" % filename) notifiers[filename].stop() del notifiers[filename]
def store_password(self, volume, password): logger.info("storing password for volume %s as plain text: %s" % (volume, password)) passwords = read_json_file(self.path) logger.debug("just read %s" % self.path) passwords[volume] = password write_json_file(self.path, passwords)
def xapi_batch_delete(deletions, xapi_headers): count = 0 # set up batch delete queries body = [ { "filter": { "statement.actor.account.name": { "$in": [""] } }, "timestamp": { "$gt": { "$dte": "2019-11-04T00:00:00" }, "$lt": { "$dte": "2019-11-05T00:00:00" } } } ] # generate query strings for row, student in deletions.iterrows(): student_id = student['STUDENT_ID'] vle_id = student['VLE_ID'] shib_id = student['SHIB_ID'] student_ids = [student_id, vle_id, shib_id] students_upper = [x.upper() for x in student_ids] students_lower = [x.lower() for x in student_ids] student_ids = student_ids + students_upper + students_lower # parse dates for json format start_date = student['START_DATE'] start_date = dateutil.parser.parse(start_date) start_date = datetime.strftime(start_date, '%Y-%m-%dT%H:%M:%S') end_date = student['END_DATE'] end_date = dateutil.parser.parse(end_date) end_date = datetime.strftime(end_date, '%Y-%m-%dT%H:%M:%S') # update query with students and time periods body[0]["filter"]["statement.actor.account.name"]["$in"] = student_ids body[0]['timestamp']['$gt']['$dte'] = start_date body[0]['timestamp']['$lt']['$dte'] = end_date str_query = json.dumps(body) str_query = str_query[1:-1] str_query = json.loads(str_query) url = 'https://jisc.learninglocker.net/api/v2/batchdelete/initialise' logger.info('Sending batch request: ' + url + str(str_query)) response = requests.post(url, headers=xapi_headers, json=str_query) if response.ok: logger.info(str(response) + ': Successfully sent batch delete request') count += 1 else: logger.error(str(response) + ': Post statement to LDH failed') if count > 0: # verify requests logger.info('Showing ' + str(count) + ' successful statements sent to LDH') request_verify = 'https://jisc.learninglocker.net/api/connection/batchdelete?filter={"done": false}&sort={"createdAt":-1,"_id": 1}&first=**count**' request_verify = request_verify.replace('**count**', str(count)) response = requests.get(request_verify, headers=xapi_headers) if response.ok: message = json.dumps(response.text) logger.info(message) # show the user the ids and filters of the requests sent else: logger.info('No requests sent to LDH, check process.')
def _addSuspiciousInvoke(self, throughMethod, dst, stack): #TODO: Write logic later to filter out unsuspicious methods logger.info("The destination for invoke method [%s] was not found in the list of uncovered methods! Adding it to the list of suspicious methods!" % str(dst)) self._suspicious_invoke.append((throughMethod, dst, stack))