def capture_series(resX, resY, num_photos, job_dir): i = 0 while i < num_photos: print("Capturing photo " + str(i) + " into dir " + job_dir) capture.capture(resX, resY, i, job_dir) i += 1 return
def UpdateJson(name, content): if name not in capture().re_dict: return "Failed! Do not have this json!" capture().re_dict[name] = content file_path = os.sep.join([capture().re_config_dir, name + '.json']) with open(file_path, 'w') as f: json.dump(content, f) return "Successful!"
def DeletJson(name): #del in memory if name not in capture().re_dict: return "Failed! Do not have this json!" del (capture().re_dict[name]) # del in local file_path = os.sep.join([capture().re_config_dir, name + '.json']) os.remove(os.sep.join([capture().re_config_dir, name + '.json'])) return "Successful!"
def test_parse_active_view(): """Parse active view works""" # Set focus to modelPanel1 (assume it exists) # Otherwise the panel with focus (temporary panel from capture) # got deleted and there's no "active panel" import maya.cmds as cmds cmds.setFocus("modelPanel1") options = capture.parse_active_view() capture.capture(**options)
def AddJson(name, content): # update the memory if name in capture().re_dict: return "Add Json Failed! Already have this json,you can get and update that file, or delet it first and try again! " capture().re_dict[name] = content #update local # print(type(content)) file_path = os.sep.join([capture().re_config_dir, name + '.json']) with open(file_path, 'w') as f: json.dump(content, f) return "Successful!"
def stop_motion(): print "stop motion..." p = subprocess.Popen(['sudo', 'pkill', 'motion']) p.wait() # make sure that the resource has been released while True: try: capture(conf['image_path']) break except picamera.PiCameraMMALError as e: print e time.sleep(0.5)
def _main(): args = _parse_arguments() global _debug _debug = args.debug try: if args.command in ['capture', 'cap']: capture(args) elif args.command in ['clean']: clean(args) elif args.command in ['convert', 'con']: convert(args) except KeyboardInterrupt: # TODO: Maybe track some statistics and print them on exit. print() sys.exit(0)
def test_print_for(): class Print_For(Cell): def __init__(self): super().__init__() self.print = P.Print() def construct(self, x, y): y = x + y self.print("input_x before:", x, "input_y before:", y) for _ in range(3): y = y + 1 self.print("input_x after:", x, "input_y after:", y) return y cap = Capture() with capture(cap): input_x = Tensor(2, dtype=ms.int32) input_y = Tensor(4, dtype=ms.int32) expect = Tensor(9, dtype=ms.int32) net = Print_For() out = net(input_x, input_y) time.sleep(0.1) np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy()) patterns = { 'input_x before:\nTensor(shape=[], dtype=Int32, value=2)\n' 'input_y before:\nTensor(shape=[], dtype=Int32, value=6)', 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n' 'input_y after:\nTensor(shape=[], dtype=Int32, value=7)', 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n' 'input_y after:\nTensor(shape=[], dtype=Int32, value=8)', 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n' 'input_y after:\nTensor(shape=[], dtype=Int32, value=9)' } check_output(cap.output, patterns)
def test_print_add(): class Print_Add(Cell): def __init__(self): super().__init__() self.print = P.Print() self.add = P.Add() def construct(self, x, y): x = self.add(x, y) self.print("input_x:", x, "input_y:", y) return x cap = Capture() with capture(cap): input_x = Tensor(3, dtype=ms.int32) input_y = Tensor(4, dtype=ms.int32) expect = Tensor(7, dtype=ms.int32) net = Print_Add() out = net(input_x, input_y) time.sleep(0.1) np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy()) patterns = { 'input_x:\nTensor(shape=[], dtype=Int32, value=7)\n' 'input_y:\nTensor(shape=[], dtype=Int32, value=4)' } check_output(cap.output, patterns)
def calibrate(self): off = numpy.zeros((self.H,self.W,3), dtype=numpy.uint8) cv2.imshow("projector", off) cv2.waitKey(100) self.minimums = cv2.blur(numpy.asarray(capture()).mean(axis=2), (1,1)) on = 255*numpy.ones((self.H,self.W,3), dtype=numpy.uint8) cv2.imshow("projector", on) cv2.waitKey(100) cv2.waitKey(100) self.maximums = cv2.blur(numpy.asarray(capture()).mean(axis=2), (1,1)) self.points = get_diff_rect(self.minimums, self.maximums) self.draw_points() self.calibrated = True
def test_print_if(): class Print_If(Cell): def __init__(self): super().__init__() self.print = P.Print() def construct(self, x, y): self.print("input_x before:", x, "input_y before:", y) if x < y: self.print("input_x after:", x, "input_y after:", y) x = x + 1 return x cap = Capture() with capture(cap): input_x = Tensor(3, dtype=ms.int32) input_y = Tensor(4, dtype=ms.int32) expect = Tensor(4, dtype=ms.int32) net = Print_If() out = net(input_x, input_y) time.sleep(0.1) np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy()) patterns = { 'input_x before:\nTensor(shape=[], dtype=Int32, value=3)\n' 'input_y before:\nTensor(shape=[], dtype=Int32, value=4)', 'input_x after:\nTensor(shape=[], dtype=Int32, value=3)\n' 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)' } check_output(cap.output, patterns)
def refresh(self): """Refresh the playblast preview""" frame = cmds.currentTime(query=True) # When playblasting outside of an undo queue it seems that undoing # actually triggers a reset to frame 0. As such we sneak in the current # time into the undo queue to enforce correct undoing. cmds.currentTime(frame, update=True) with lib.no_undo(): options = self.options_getter() tempdir = tempfile.mkdtemp() # override settings that are constants for the preview options = options.copy() options['complete_filename'] = os.path.join(tempdir, "temp.jpg") options['width'] = self.preview_width options['height'] = self.preview_height options['viewer'] = False options['frame'] = frame options['off_screen'] = True options['format'] = "image" options['compression'] = "jpg" options['sound'] = None fname = capture.capture(**options) if not fname: log.warning("Preview failed") return image = QtGui.QPixmap(fname) self.preview.setPixmap(image) os.remove(fname)
def capture_scene(options): """Capture using scene settings. Uses the view settings from "panel". This ensures playblast is done as quicktime H.264 100% quality. It forces showOrnaments to be off and does not render off screen. Arguments: options (dict): a collection of output options Returns: str: Full path to playblast file. """ filename = options.get("filename", "%TEMP%") log.info("Capturing to: {0}".format(filename)) options = options.copy() # Force viewer to False in call to capture because we have our own # viewer opening call to allow a signal to trigger between playblast # and viewer options['viewer'] = False # Remove panel key since it's internal value to capture_gui options.pop("panel", None) path = capture.capture(**options) path = _fix_playblast_output_path(path) return path
def indexing(filter, pFile): # Get listFile name from pFile, difference of them being only in extension name listFile = re.sub('\.xml','.txt',pFile) # Get listFile name from pFile indexname = re.findall(r'([^/]+).xml',pFile) indexname = indexname[0] # Fetch a page title from listFile one at a time pt = pageindex.PageTitle(listFile) # cat is for fetching distinct categories for pages within this index cat = [] for pageElement in capture.capture(pt, filter): try: pageText = pageElement[0] pageTitle = pageElement[1] wiki = pageindex.WikiToXML(pageText, pageTitle, pFile, cat) xmlDoc,paralist,cat = wiki.wikitoxml() print xmlDoc.toxml().encode('utf-8') # Print xmlDoc to XML format output #print paralist # Save V file into hashed directory hashpath = save(xmlDoc, pageTitle) # Import DTPM parameters (paralist) into MySQL page = wikipedia.Page(None,title=pageTitle) importDTPM(paralist, indexname, hashpath, pageTitle, page) except (Exception), e: # Avoid cessation of the indexing sequence #print e #raise print '\nIndexation continues.\n' continue
def getJson(name): res = '' try: res = capture().re_dict[name] except Exception as e: logging.exception("try to get an none exits json file") return res
def test_print_assign_add(): class Print_Assign_Add(Cell): def __init__(self): super().__init__() self.print = P.Print() self.add = P.Add() self.para = Parameter(Tensor(1, dtype=ms.int32), name='para') def construct(self, x, y): self.print("before:", self.para) self.para = x self.print("after:", self.para) x = self.add(self.para, y) return x cap = Capture() with capture(cap): input_x = Tensor(3, dtype=ms.int32) input_y = Tensor(4, dtype=ms.int32) expect = Tensor(7, dtype=ms.int32) net = Print_Assign_Add() out = net(input_x, input_y) time.sleep(0.1) np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy()) patterns = { 'before:\nTensor(shape=[], dtype=Int32, value=1)', 'after:\nTensor(shape=[], dtype=Int32, value=3)' } check_output(cap.output, patterns)
def image_preview(self, shutter_speed=None, resolution=None, light_color="blue", auto_exposure=False): img = capture.capture( shutter_speed=shutter_speed or self.shutter_speed, resolution=resolution or self.resolution, light_color=light_color, auto_exposure=auto_exposure, ) return img
def test_preset(): """Creating and applying presets works""" preset = { "width": 320, "height": 240, "camera_options": { "displayGateMask": False }, "viewport_options": { "wireframeOnShaded": True }, "display_options": { "displayGateMask": False } } capture.capture(**preset)
def process(self, instance): filename = list(instance.data["collection"])[0] filename = filename.replace( instance.data["collection"].format("{tail}"), "") capture( instance[0].getTransform().name(), filename=filename, viewer=False, show_ornaments=False, overwrite=True, off_screen=True, viewport_options={"rendererName": "vp2Renderer"}, viewport2_options={ "multiSampleEnable": True, "multiSampleCount": 8 }, camera_options={"panZoomEnabled": False}, )
def capture_helper(image_path="./image.jpg"): print "capture_helper" anaylze_url = "http://" + conf['anaylze_ip'] + ":" + conf[ 'anaylze_port'] + "/analyze" capture(image_path) image = open(image_path) data = image.read() image.close() print "request anaylze" res = requests.post(url=anaylze_url, data=data, headers={'Content-Type': 'application/octet-stream'}) res.raise_for_status() j = res.json() print "res:" + str(j) global cur_emotion if len(j) > 0 and "happiness" in j: cur_emotion = j
def image_preview(self, shutter_speed=None, resolution=None, light_color='blue', auto_exposure=False): img = capture.capture(shutter_speed=shutter_speed or self.shutter_speed, resolution=resolution or self.resolution, light_color=light_color, auto_exposure=auto_exposure) return img
def init(COM_PORT): ser = serial.Serial(COM_PORT, 9600) time.sleep(4) ser.write(b'L') ser.read_until('L'.encode()) capture.capture('COM2', 'l') sl = PROCESS.process('l.bmp') ser.write(b'M') ser.read_until('M'.encode()) capture.capture('COM2', 'm') sm = PROCESS.process('m.bmp') ser.write(b'R') ser.read_until('R'.encode()) capture.capture('COM2', 'r') sr = PROCESS.process('r.bmp') ser.write(b'M') ser.read_until('M'.encode()) ser.close() # data = { # 'L': extract_data('l.bmp'), # 'M': extract_data('m.bmp'), # 'R': extract_data('r.bmp'), # } return {'L': sl, 'M': sm, 'R': sr} # , 'data': data }
def scanFor(COM_PORT, dest): ser = serial.Serial(COM_PORT, 9600) time.sleep(4) ser.write(b'L') ser.read_until('L'.encode()) capture.capture('COM2', 'l') sl = PROCESS.process('l.bmp') if sl == dest: ser.write(b'M') ser.read_until('M'.encode()) ser.close() return extract_data('l.bmp') ser.write(b'M') ser.read_until('M'.encode()) capture.capture('COM2', 'm') sm = PROCESS.process('m.bmp') if sm == dest: ser.write(b'M') ser.read_until('M'.encode()) ser.close() return extract_data('m.bmp') ser.write(b'R') ser.read_until('R'.encode()) capture.capture('COM2', 'r') sr = PROCESS.process('r.bmp') ser.write(b'M') ser.read_until('M'.encode()) if sr == dest: ser.write(b'M') ser.read_until('M'.encode()) ser.close() return extract_data('r.bmp') ser.close() return 'No pic!'
class Face_Detector: print("Hello,\nWelcome to Face detection Machine\n") print("Choose any one :") print("1. Detect Faces \n2. Register Faces\n") start = int(input()) if start == 2: name = input("Enter your first name\n") capture(name) if start == 1: detect()
def process(records): # print(param) records = json.loads(records) res = dict() for record in records: temp_dict = dict() key_ID, contents = tools.record_parse(record) # with open("./record.txt","a") as f: # f.write("\n\n\n"+key_ID+"\n\n") # f.write(contents+"\n") # print(contents) capture_content = capture().parse_re(contents) res[key_ID] = tools.content2dict(capture_content) return res
def getevent(driver, event, element, string): if event == "click": element.click() elif event == "sendkeys": element.send_keys(string) elif event == "infarme": driver.switch_to.frame(element) elif event == 'clear': element.clear() elif event == 'select': Select(element).select_by_visible_text(string) elif event == 'quit': driver.quit() elif event == 'sleep': time.sleep(int(string)) elif event == 'outfarme': driver.switch_to.default_content() elif event == 'screenshot': filephoto = "D:/" + str(string) + ".png" capture(driver, filephoto) elif event == 'refresh': driver.refresh() else: print("操作方式输入错误,请按规范输入!" + str(event))
def indexing(): for pageElement in capture.capture(pt): try: pageText = pageElement[0] pageTitle = pageElement[1] wiki = pageindex.WikiToXML(pageText, pageTitle, xmlFilename) xmlDoc,paralist = wiki.wikitoxml() print xmlDoc.toxml().encode('utf-8') # print xmlDoc to XML format output, encode to utf-8 on Ubuntu #print paralist save(xmlDoc, pageTitle) except Exception, e: # avoid cessation of the indexing sequence #print e print '\nIndexation continues.\n' continue
def main(self, args): # Set working directory to controller directory os.chdir(os.path.join(os.path.dirname(__file__), "..")) try: self.logger = capture.capture("capture.log",["timestamp","type","process_name","pid","device_name", "device_h", "data_base64"]) # Attach to all processes processes = self.CEngine.GetAllProcesses() for process in processes: # Attach self.attach_new_process(process.ProcessName, process.Id, process.Handle) return except Exception,e: logging.exception("Controller main loop unhandled exception.")
def indexing(): for pageElement in capture.capture(pt): try: pageText = pageElement[0] pageTitle = pageElement[1] wiki = pageindex.WikiToXML(pageText, pageTitle, xmlFilename) xmlDoc, paralist = wiki.wikitoxml() print xmlDoc.toxml().encode( 'utf-8' ) # print xmlDoc to XML format output, encode to utf-8 on Ubuntu #print paralist save(xmlDoc, pageTitle) except Exception, e: # avoid cessation of the indexing sequence #print e print '\nIndexation continues.\n' continue
def get_imgs_page(page, session, cw=None, try_=1): print('##### get_imgs_page', try_) print_ = get_print(cw) if cw is not None and not cw.alive: return if True: try: imgs = get_imgs_page_legacy(page, session, cw) if imgs == 'seed': raise SeedError() return imgs except Exception as e: if not isinstance(e, SeedError): raise jpgs = capture(page, session, cw, ignore_error=try_ > 1) imgs = [] rand = Random() rand.seed((u'{}{}').format(page.title, clock())) hash = uuid() DIR = get_outdir('manamoa') for p, jpg in enumerate(jpgs): if isinstance(jpg, Empty): img = Image(jpg.url, page, p) imgs.append(img) continue img_tmp = os.path.join(DIR, ('tmp{}_{:04}.jpg').format(hash, p)) if cw is not None: cw.trash_can.append(img_tmp) if not os.path.isdir(DIR): try: os.makedirs(DIR) except: pass with open(img_tmp, 'wb') as (f): f.write(jpg) img = Image(u'tmp://' + img_tmp, page, p) imgs.append(img) return imgs
def start(): if (len(sys.argv) < 4): capture_nc = capture_config.getConfig("config", 'nc') if (not capture_nc): print "error not have network card info" capture_log.log_error("error not have network card info") sys.exit(1) capture_port = capture_config.getConfig("config", 'capture_port') if (not capture_port): print "error not have capture port info" capture_log.log_error("error not have capture port info") sys.exit(1) copy_ip = capture_config.getConfig("config", 'copy_ip') if (not copy_ip): print "error not have copyserver ip info" capture_log.log_error("error not have copyserver ip info") sys.exit(1) print "Start capture port: %s network card: %s copy ip: %s" % ( capture_port, capture_nc, copy_ip) capture_log.log_info( "Start capture port: %s network card: %s copy ip: %s" % (capture_port, capture_nc, copy_ip)) copy_port = capture_config.getConfig("config", 'copy_port') if (copy_port): capture.capture(capture_nc, int(capture_port), copy_ip, int(copy_port)) else: capture.capture(capture_nc, int(capture_port), copy_ip) else: capture_nc = sys.argv[1] capture_port = int(sys.argv[2]) copy_ip = sys.argv[3] print "Start capture port: %s network card: %s copy ip: %s" % ( capture_port, capture_nc, copy_ip) if (len(sys.argv) == 5): copy_port = sys.argv[4] capture.capture(capture_nc, capture_port, copy_ip, copy_port) else: capture.capture(capture_nc, capture_port, copy_ip)
def test_print_assign_while(): class Print_Assign_While(Cell): def __init__(self): super().__init__() self.print = P.Print() self.para = Parameter(Tensor(0, dtype=ms.int32), name='para') def construct(self, x, y): self.print("input_x before:", x, "input_y before:", y, "para before:", self.para) while x < y: self.para = x x = self.para + 1 self.print("input_x after:", x, "input_y after:", y, "para after:", self.para) return x cap = Capture() with capture(cap): input_x = Tensor(1, dtype=ms.int32) input_y = Tensor(4, dtype=ms.int32) expect = Tensor(4, dtype=ms.int32) net = Print_Assign_While() out = net(input_x, input_y) time.sleep(0.1) np.testing.assert_array_equal(out.asnumpy(), expect.asnumpy()) patterns = { 'input_x before:\nTensor(shape=[], dtype=Int32, value=1)\n' 'input_y before:\nTensor(shape=[], dtype=Int32, value=4)\n' 'para before:\nTensor(shape=[], dtype=Int32, value=0)', 'input_x after:\nTensor(shape=[], dtype=Int32, value=2)\n' 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)\n' 'para after:\nTensor(shape=[], dtype=Int32, value=1)', 'input_x after:\nTensor(shape=[], dtype=Int32, value=3)\n' 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)\n' 'para after:\nTensor(shape=[], dtype=Int32, value=2)', 'input_x after:\nTensor(shape=[], dtype=Int32, value=4)\n' 'input_y after:\nTensor(shape=[], dtype=Int32, value=4)\n' 'para after:\nTensor(shape=[], dtype=Int32, value=3)' } check_output(cap.output, patterns)
def search(): url = request.form['url'] try: original_url = get_original_link(url) image = capture(original_url) buf = BytesIO() image.save(buf, format='png') image_str = base64.b64encode(buf.getvalue()).decode('utf-8') error = None result = original_url except Exception as e: LOGGER.error(traceback.format_exc()) image_str = None error = str(e) result = None return render_template('index.html', error=error, result=result, image=image_str)
def iterate(self): im = capture() self.capt_orig = numpy.asarray(im) if len(self.points) == 0: CH, CW, _cd = self.capt_orig.shape self.points = [(0,0), (CW,0), (CW,CH), (0,CH)] self.scale = float(self.capt_orig.shape[1])/800 aspect = float(self.capt_orig.shape[1])/self.capt_orig.shape[0] self.capt_scale = cv2.resize(self.capt_orig, (800, int(800/aspect))) self.transform() if self.recording: im = Image.fromarray(self.transformed, mode='RGB') im.save('rec-%06d.png' % (self.idx)) self.idx += 1 self.render_camera() self.render_projector()
def capture_blue(self): # return capture.capture_auto_exposure(initial_shutter_speed=self.shutter_speed, resolution=_contrast_image_resolution, light_color='blue') return capture.capture(shutter_speed=self.shutter_speed, resolution=self.resolution, light_color="blue")
def capture_green(self): # return capture.capture_auto_exposure(initial_shutter_speed=self.shutter_speed, resolution=_contrast_image_resolution, light_color='green') return capture.capture( shutter_speed=_contrast_image_exposure_time, resolution=_contrast_image_resolution, light_color="green" )
_light_color = 'blue' _resolution = (2592,1944) max_exposure = 20 # milliseconds _exposure_times = np.linspace(0, max_exposure, 100) # time in ms #_exposure_times = np.logspace(0, 200, 20) # time in ms saturation_value = np.iinfo(np.uint16).max*0.8 _avg_values = list() _min_values = list() _max_values = list() for expTime in _exposure_times: print('exposure time: %f ms'%expTime) img = capture.capture(shutter_speed=expTime*1000, light_color=_light_color, resolution=_resolution) min = np.min(img) avg = np.mean(img) max = np.max(img) _avg_values.append(avg) _min_values.append(min) _max_values.append(max) _avg_values = np.array(_avg_values) _min_values = np.array(_min_values) _max_values = np.array(_max_values) # save values save = {'exposure_times': _exposure_times, 'mean': _avg_values, 'min': _min_values, 'max': _max_values}
# used for testing if __name__ == '__main__': from capture import capture # Get the filename form the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required = False, help = "Path to the image to be scanned") args = vars(ap.parse_args()) if(args["image"] is None): print("usage: measure.py -i [image]") print("using capture.py for image aquisition") image = capture(shutter_speed=200000) else: if(not os.path.isfile(args["image"])): print("not a file") exit() # read image as grayscale image = imread(args["image"], as_grey=True) #image = rescale(image, scale=0.5) # remove noise #image = gaussian_filter(image, 1) inspectionMask = np.zeros(image.shape) spot_size=image.shape[0] * 0.05
def test_viewport2_options(): """(Optional) viewport2 options works""" capture.capture(viewport2_options={"ssaoEnable": True})
def captureScreen(): capture.capture( SCREEN_CAPTURE_PATH )
from capture import captureJPG as capture import numpy import cv2 import time W,H = (1920,1200) cv2.namedWindow("brute") out = numpy.ones((H,W,3), numpy.uint8) cv2.imshow("brute", out) cv2.waitKey(5) cv2.waitKey() cv2.waitKey() cv2.waitKey() for i in range(0,255,5): show = out * i cv2.imshow("brute", show) cv2.waitKey(5) im = capture() im.save('brute-%d.png' % (i)) cv2.waitKey(5)
def test_viewport_options(): """(Optional) viewport options works""" capture.capture(viewport_options={"wireframeOnShaded": True})
# do routering table. # OBS: Implica que o NWK_SRC tem como vizinhos todos esses nos. # ****************************************************************************************** import pyshark import capture import json import lib.geoPositioning # PCAP_FILE = 'pcap_files/bigger_file.PCAP' PCAP_FILE = 'pcap_files/smaller_file.PCAP' cap = capture.capture() nodes = cap.fileCapture(PCAP_FILE) # f = file('nodes.log', 'w') f = "/home/samuel/TCC/docs/geo_positions.csv" tot_in = 0 tot_out = 0 tot_pkt = 0 geo = lib.geoPositioning.geoPositioning(f) print "Following nodes has been processed:" for node in nodes: """Node is a node object""" print "Processing nodes"
def test_camera_options(): """(Optional) camera options works""" capture.capture(camera_options={"displayGateMask": False})
def test_capture(): """Plain capture works""" capture.capture()
def test_parse_view(): """Parse view works""" options = capture.parse_view("modelPanel1") capture.capture(**options)
def main(self, args): # Set working directory to controller directory os.chdir(os.path.join(os.path.dirname(__file__), "..")) try: printOnly = False for arg in args: if str(arg) == "-printonly": printOnly = True # Select a random seed seed = int(random.random()*1000000) self.generator = random.Random() self.generator.seed(seed) print "Controller is using seed value of %i. Set this value manually to reproduce this attack." % seed # Perform an initial measurement to gather data for an organized attack logger = capture.capture("capture.log",["timestamp","type","process_name","pid","device_name", "device_h", "data_base64"]) mBreakpoint = BreakpointMeasurement() mProcess = ProcessDeviceIo(self, "C:\\Crash\\", mBreakpoint, -1, -1, 0, printOnly, logger ) self.CEngine.AttachProcess(mProcess) if printOnly: sleep(10000) else: sleep(10) mProcess.stop() measurements = mBreakpoint.measurement # Generate the measurement report report_sum_fuzz_blocks = 0 report_events_by_target = {} report_blocks_by_target = {} for measurement in measurements: # [[target_name, event_name, len(fuzz_blocks)]] report_sum_fuzz_blocks += measurement[2] if measurement[1] not in report_events_by_target: report_events_by_target[measurement[1]] = 1 else: report_events_by_target[measurement[1]] += 1 if measurement[1] not in report_blocks_by_target: report_blocks_by_target[measurement[1]] = measurement[2] else: report_blocks_by_target[measurement[1]] += measurement[2] # Print report and wait for input to attack print "CONTROLLER MEASUREMENT: Attack will consist of %i attacked events corresponding to %i fuzzed blocks.\n" % (len(measurements), report_sum_fuzz_blocks) print "Number of events being attacked by name:" for name, count in sorted(report_events_by_target.iteritems(), key=operator.itemgetter(1), reverse=True): print "%i\t%s" % (count, name) print "\nNumber of attacked blocks being attacked by name:" for name, count in sorted(report_blocks_by_target.iteritems(), key=operator.itemgetter(1), reverse=True): print "%i\t%s" % (count, name) print "\n\n" print "ALERT: Fuzzing device communication may result in corrupting files or other damage depending on usage." raw_input("Press any key to begin attack. I understand this has the potential to cause damage.\n") # Run the auto-it script that will press "Cancel" on the watson crash dump. Unfortunately this has to be clicked # before the process is terminated in order for a proper crashdump to be created it seems. subprocess.Popen(['autoit3.exe', os.path.join(os.path.dirname(__file__), "..", "autoit", "watson_cancel.au3"), ">nul"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) f = open("log.txt","wb") f.write(str(datetime.datetime.now()) + "\r\n") f.write("controller using seed %i\r\n" % seed) f.write("Number of events being attacked by name:\r\n") for name, count in sorted(report_events_by_target.iteritems(), key=operator.itemgetter(1), reverse=True): f.write("%i\t%s\r\n" % (count, name)) print "\nNumber of attacked blocks being attacked by name:" for name, count in sorted(report_blocks_by_target.iteritems(), key=operator.itemgetter(1), reverse=True): f.write("%i\t%s\r\n" % (count, name)) f.flush() f.close() # Attack each fuzz block sequentially - each with it's own attack instance print "CONTROLLER: Beginning attacks" times = [] processes = [] terminate_counts = [] fault_pause = 180 self.last_fault = time.time()-120000 max_runtime = 10 num_processes = 20 unique_identifier = 1 event_positions = {} logger = capture.capture_empty() while True: for j in range(len(report_events_by_target)): # Attack this occurrence of this event while len(processes) >= num_processes: #sleep(0.5) sleep(0.1) # Cleanup old processes for k in range(len(processes)): #if processes[k].attack_count > terminate_counts[k] or processes[k].terminated or int(time() - times[k]) > max_runtime: if int(time.time() - self.last_fault) > fault_pause and ( processes[k].terminated or int(time.time() - times[k]) > max_runtime ): processes[k].stop() del processes[k] del times[k] del terminate_counts[k] break event_name = report_events_by_target.keys()[j] attack_index_max = report_events_by_target[event_name] attack_index_current = 0 if event_name in event_positions: attack_index_current = event_positions[event_name] event_positions[event_name] = (event_positions[event_name] + 1) % attack_index_max else: event_positions[event_name] = 0 # Create a new process breakpointSeed = self.generator.randint(1,10000000) newBreakpoint = BreakpointAttackSequentially(5, event_positions[event_name], event_name, breakpointSeed ) # 5% of data will be attacked newProcess = ProcessDeviceIo(self, "C:\\Crash\\", newBreakpoint, -1, -1, unique_identifier, False, logger ) self.CEngine.AttachProcess(newProcess) unique_identifier+=1 terminate_counts.append( event_positions[event_name] ) times.append( time.time() ) processes.append(newProcess) except Exception,e: logging.exception("Controller main loop unhandled exception.")
def test_display_options(): """(Optional) display options works""" capture.capture(display_options={"displayGradient": False})