def main(): parser = argparse.ArgumentParser(description="") subparsers = parser.add_subparsers(help="", dest="subcmd") scanparser = subparsers.add_parser("scan", help="") scanparser.add_argument('--repo', default="") scanparser.add_argument('--subdir', default=".") depresolveparser = subparsers.add_parser("depresolve", help="") graphparser = subparsers.add_parser("graph", help="") graphparser.add_argument('--verbose', help='verbose', action='store_true', default=True) graphparser.add_argument('--debug', help='debug', action='store_true', default=False) graphparser.add_argument('--quiet', help='quiet', action='store_true', default=False) graphparser.add_argument('--unit-file', help="debugging purposes", default=None) args = parser.parse_args() if args.subcmd == "scan": scan(os.getcwd())
def reply_to_tweet(self, tweet, prefix): video_url = get_gif_video_url_climbing(self.api, tweet) if video_url is None: self.log( "Couldn't find a gif video URL for {}" .format(self._tweet_url(tweet))) return text = prefix with tmpdir(delete=not DEBUG) as base_dir: filename = os.path.join(base_dir, 'scanned.gif').encode('utf-8') scan(video_url, base_dir, filename) if self._is_silent(): self.log( "Silent mode is on. Would've responded to {} with '{} {}'" .format(self._tweet_url(tweet), text, filename)) return self.post_tweet( text, reply_to=tweet, media=filename, ) self.update_reply_threshold(tweet, prefix)
def _run(self): most_important = [ 80, 443, 8080, 53, 5222, 25, 110, 465 ] try: portquiz_addr = socket.gethostbyname(self.PORTQUIZ_HOSTNAME) except socket.gaierror: portquiz_addr = self.PORTQUIZ_ADDR logger.debug('Scan most important. IP: %s', portquiz_addr) scan.scan([portquiz_addr], most_important, timeout=self.connect_timeout, abort=self.abort, on_open_port=self._on_open_port, pass_socket=True) logger.debug('Scan other ports') if len(self.available) < self.amount: other = list([ x for x in scan.TOP1000 if x not in most_important ]) random.shuffle(other) scan.scan( [portquiz_addr], other, timeout=self.connect_timeout, abort=self.abort, on_open_port=self._on_open_port, pass_socket=True) logger.debug('Done. Found %d ports', len(self.available))
def main(): df = read_csv_file() # convert pandas dataframe to numpy array for faster processing website_list = df['Website'].values for website in website_list: scan(df, website) return None
def load(proj_n): # function to load a project global current c = conn.cursor() c.execute("SELECT * FROM project;") rows = c.fetchall() current = rows[int(proj_n)] print_logo() print("load project") print("------------------------------------------") print("project: " + current[1] + " loaded") print("") print("options: ") print("") print("0 : scan all") print("1 : resume") print("2 : nmap only") print("3 : create report") print("------------------------------------------") i_nr = input("option: ") # choose one option if i_nr == "0": # scan all nmap_hack.run_nmap() scan.scan() ########### elif i_nr == "1": # resume scan.scan() elif i_nr == "2": # nmap only nmap_hack.run_nmap() elif i_nr == "3": # create report report.create(current[1]) elif (i_nr != "0" or "1" or "2" or "3"): print("enter a valid value!")
def savegame_start_scan(): storage = [] storage = scan.scan("savegames") for items in storage: log_post("Found savegame: " + items) listbox_right.insert(0, items) path = os.path.join("savegames" , items) properties = {"path" : path} pathes[items] = properties path = os.path.join("C:\\","Users",os.getlogin(),"AppData","LocalLow","Amistech") log_post("Get savegame path: " + path) savegames = [] savegames = scan.scan(path) for items in savegames: log_post("Found savegame: " + items) if items == "My Summer Car": listbox_left.insert(0, items) else: listbox_right.insert(0, items) path_savegame = os.path.join("C:\\","Users",os.getlogin(),"AppData","LocalLow","Amistech",items) properties = {"path" : path_savegame} pathes[items] = properties print(pathes)
def _run(self): most_important = [80, 443, 8080, 53, 5222, 25, 110, 465] logger.debug('Scan most important. IP: %s', self.ip) scan.scan([self.ip], most_important, timeout=self.connect_timeout, abort=self.abort, on_open_port=self._on_open_port, pass_socket=True) logger.debug('Scan other ports') if len(self.available) < self.amount: other = list([x for x in scan.TOP1000 if x not in most_important]) random.shuffle(other) scan.scan([self.ip], other, timeout=self.connect_timeout, abort=self.abort, on_open_port=self._on_open_port, pass_socket=True) logger.debug('Done. Found %d ports', len(self.available))
def do_magic(target='', targets=[], need_crawl=False, same_domain=True, what_to_scan=[]): """ scan controller :param target: :param targets: :param need_crawl: :param same_domain: :param what_to_scan: :return: """ vulns = [] if target == '' and len(targets) == 0: print('Check your targets') else: if target != '': print('Now scan ' + target) vulns = scan.scan( target, what_to_scan) if not need_crawl else scan.crawl_and_scan( target, what_to_scan, same_domain=same_domain) if len(targets) > 0: for url in targets: print('Now scan ' + url) vulns = scan.scan( url, what_to_scan) if not need_crawl else scan.crawl_and_scan( url, what_to_scan, same_domain=same_domain) return vulns
def do_scan_remote(profile, opts, gate): ensure_code_reachability() from scan import scan, rating scan(profile, opts.smoonURL, gate) try: rating(profile, opts.smoonURL, gate) except ValueError: print "Could not get rating!"
def test_skip_1(self, mock_qualify): """ It should not post a reply if the article does not qualify but has not been encountered. """ mock_qualify.return_value = False scan(MOCK_SUBREDDIT) test_qualify.MOCK_SUBMISSION.reply.assert_not_called()
def process(timestamp, lecture, path): print path scan.scan(path) if upload(timestamp, lecture, path): try: os.remove(path) except OSError: print 'Error al subir la anotacion' pass
def say_something(): text = input('話しかける:') if text == 'ラーメンタイマー' or text == '砂時計': skill.ramen() elif text == '癒やして': skill.care() elif text == '計算' or text == '電卓': skill.calculation() else: scan.scan(text)
def main(): # defaults are binary outcome and exclude stopwords binary_label = True exclude_stopwords = True # a modified scan that returns three lists data_nosw, data_positive_nosw, data_negative_nosw = (scan.scan('finefoods.txt', exclude_stopwords, binary_label)) # open files for writing top_all_nosw = open(os.path.join(sys.path[0], "top_all_nosw.txt"), "wb") top_positive_nosw = open(os.path.join(sys.path[0], "top_positive_nosw.txt"), "wb") top_negative_nosw = open(os.path.join(sys.path[0], "top_negative_nosw.txt"), "wb") # join all reviews for wordcount all_review_nosw = ' '.join([row[0] for row in data_nosw]) dict_all_nosw = utils.get_unigram(all_review_nosw)[0] # sort by most frequent words and write to file top_all_nosw.write('\n'.join('%s %s' % x for x in sorted(dict_all_nosw.items(), key=operator.itemgetter(1), reverse = True))) # join all positive reviews for wordcount positive_review_nosw = ' '.join([row[0] for row in data_positive_nosw]) dict_positive_nosw = utils.get_unigram(positive_review_nosw)[0] top_positive_nosw.write('\n'.join('%s %s' % x for x in sorted(dict_positive_nosw.items(), key=operator.itemgetter(1), reverse = True))) # join all negative reviews for wordcount negative_review_nosw = ' '.join([row[0] for row in data_negative_nosw]) dict_negative_nosw = utils.get_unigram(negative_review_nosw)[0] top_negative_nosw.write('\n'.join('%s %s' % x for x in sorted(dict_negative_nosw.items(), key=operator.itemgetter(1), reverse = True))) # close files top_all_nosw.close() top_positive_nosw.close() top_negative_nosw.close() # same set of routines, but this time allowing stopwords data_sw, data_positive_sw, data_negative_sw = scan.scan('finefoods.txt', not exclude_stopwords, binary_label) top_all_sw = open(os.path.join(sys.path[0], "top_all_sw.txt"), "wb") top_positive_sw = open(os.path.join(sys.path[0], "top_positive_sw.txt"), "wb") top_negative_sw = open(os.path.join(sys.path[0], "top_negative_sw.txt"), "wb") all_review_sw = ' '.join([row[0] for row in data_sw]) dict_all_sw = utils.get_unigram(all_review_sw)[0] top_all_sw.write('\n'.join('%s %s' % x for x in sorted(dict_all_sw.items(), key=operator.itemgetter(1), reverse = True))) positive_review_sw = ' '.join([row[0] for row in data_positive_sw]) dict_positive_sw = utils.get_unigram(positive_review_sw)[0] top_positive_sw.write('\n'.join('%s %s' % x for x in sorted(dict_positive_sw.items(), key=operator.itemgetter(1), reverse = True))) negative_review_sw = ' '.join([row[0] for row in data_negative_sw]) dict_negative_sw = utils.get_unigram(negative_review_sw)[0] top_negative_sw.write('\n'.join('%s %s' % x for x in sorted(dict_negative_sw.items(), key=operator.itemgetter(1), reverse = True))) top_all_sw.close() top_positive_sw.close() top_negative_sw.close()
def _run(self): most_important = [ 80, 443, 8080, 53, 5222, 25, 110, 465 ] scan.scan([self.PORTQUIZ_ADDR], most_important, timeout=self.connect_timeout, abort=self.abort, on_open_port=self._on_open_port, pass_socket=True) if len(self.available) < self.amount: other = list([ x for x in scan.TOP1000 if not x in most_important ]) random.shuffle(other) scan.scan([self.PORTQUIZ_ADDR],other, timeout=self.connect_timeout, abort=self.abort, on_open_port=self._on_open_port, pass_socket=True)
def start(): print("Starting main") reddit = praw.Reddit(client_id=config.CLIENT["ID"], client_secret=config.CLIENT["SECRET"], user_agent=config.USER_AGENT, username=config.USERNAME, password=config.PASSWORD) scan(reddit.subreddit(config.SUBREDDIT)) DatabaseManager.disconnect()
def recognize(imgname, output, desired, show_intermediate_results=False): scan(imgname, show_intermediate_results) im = cv2.imread('deskewed.jpg') im = cv2.dilate(im, np.ones((2, 2))) newimgname = 'no_noise.jpg' cv2.imwrite(newimgname, im) crop(newimgname, 'scan_res.jpg', show_intermediate_results) recognized_text = pytesseract.image_to_string(Image.open('scan_res.jpg'), config="config") with open(output, 'w+') as f: print(recognized_text, file=f) print('Accuracy: ' + str(test_accuracy(scan_res=output, desired=desired)))
def sendscan(): if not deviceEntry.get(): warningPopUp("You haven't entered a device name.") if not ipList: warningPopUp("You haven't entered any IP address.") else: succesPopUp scan(deviceEntry.get(), ipList, config_id) questions() ipList[:] = [] textentry.delete(0, 'end') output.delete(0, 'end') deviceEntry.delete(0, 'end')
def test_success(self, mock_format_comment, mock_HandlerManager, mock_qualify): """ It should post a reply. """ mock_qualify.return_value = True mock_HandlerManager.get_handler.return_value = MOCK_HANDLER mock_format_comment.return_value = FORMATTED_COMMENT scan(MOCK_SUBREDDIT) test_qualify.MOCK_SUBMISSION.reply.assert_called_with( FORMATTED_COMMENT)
def test_skip_1(self, mock_DatabaseManager, mock_qualify): """ It should not post a reply but record skip to the database if the article does not qualify but has not been encountered. """ mock_qualify.return_value = False mock_DatabaseManager.check_id.return_value = False scan(mock_subreddit) test_qualify.mock_submission.reply.assert_not_called() mock_DatabaseManager.write_id.assert_called_with( test_qualify.MOCK_SUBMISSION_ID, DatabaseActionEnum.SKIP)
def main(): binary_label = True exclude_stopwords = True data_nosw, data_positive_nosw, data_negative_nosw = (scan.scan('fineshort.txt', exclude_stopwords, binary_label)) top_all_nosw = open(os.path.join(sys.path[0], "top_all_nosw.txt"), "wb") top_positive_nosw = open(os.path.join(sys.path[0], "top_positive_nosw.txt"), "wb") top_negative_nosw = open(os.path.join(sys.path[0], "top_negative_nosw.txt"), "wb") all_review_nosw = ' '.join([row[0] for row in data_nosw]) dict_all_nosw = utils.get_unigram(all_review_nosw)[0] top_all_nosw.write('\n'.join('%s %s' % x for x in sorted(dict_all_nosw.items(), key=operator.itemgetter(1), reverse = True))) positive_review_nosw = ' '.join([row[0] for row in data_positive_nosw]) dict_positive_nosw = utils.get_unigram(positive_review_nosw)[0] top_positive_nosw.write('\n'.join('%s %s' % x for x in sorted(dict_positive_nosw.items(), key=operator.itemgetter(1), reverse = True))) negative_review_nosw = ' '.join([row[0] for row in data_negative_nosw]) dict_negative_nosw = utils.get_unigram(negative_review_nosw)[0] top_negative_nosw.write('\n'.join('%s %s' % x for x in sorted(dict_negative_nosw.items(), key=operator.itemgetter(1), reverse = True))) top_all_nosw.close() top_positive_nosw.close() top_negative_nosw.close() data_sw, data_positive_sw, data_negative_sw = scan.scan('fineshort.txt', not exclude_stopwords, binary_label) top_all_sw = open(os.path.join(sys.path[0], "top_all_sw.txt"), "wb") top_positive_sw = open(os.path.join(sys.path[0], "top_positive_sw.txt"), "wb") top_negative_sw = open(os.path.join(sys.path[0], "top_negative_sw.txt"), "wb") all_review_sw = ' '.join([row[0] for row in data_sw]) dict_all_sw = utils.get_unigram(all_review_sw)[0] top_all_sw.write('\n'.join('%s %s' % x for x in sorted(dict_all_sw.items(), key=operator.itemgetter(1), reverse = True))) positive_review_sw = ' '.join([row[0] for row in data_positive_sw]) dict_positive_sw = utils.get_unigram(positive_review_sw)[0] top_positive_sw.write('\n'.join('%s %s' % x for x in sorted(dict_positive_sw.items(), key=operator.itemgetter(1), reverse = True))) negative_review_sw = ' '.join([row[0] for row in data_negative_sw]) dict_negative_sw = utils.get_unigram(negative_review_sw)[0] top_negative_sw.write('\n'.join('%s %s' % x for x in sorted(dict_negative_sw.items(), key=operator.itemgetter(1), reverse = True))) top_all_sw.close() top_positive_sw.close() top_negative_sw.close() length = len(data_nosw) train_data = data_nosw[:int(length*.8)] test_data = data_nosw[int(length*.8):]
def test_scan(self): cursor = db.cursor() now = datetime.datetime.now() start_time_regex = r'^' + str('%04d' % now.year) + '\-' + str( '%02d' % now.month) + '\-' + str('%02d' % now.day) + '\:' + str( '%02d' % now.hour) + '\:.{2}$' # First pass redirect_output = RedirectOutput() saveStdOut = sys.stdout sys.stdout = redirect_output scan.scan("test/data", "test/tmp/config.json") sys.stdout = saveStdOut self.assertEqual('11\n', redirect_output.output) # Get scan code for test with second pass cursor.execute( "SELECT value FROM settings WHERE name='last_scan_code'") result = cursor.fetchone() firstScanCode = result[0] # Number of scanned files cursor.execute("SELECT * FROM audio_file") self.assertEqual(11, cursor.rowcount) # Scan start time cursor.execute( "SELECT value FROM settings WHERE name='last_scan_start_time'") result = cursor.fetchone() self.assertRegex(result[0], start_time_regex) # Scan state (Finish) cursor.execute( "SELECT value FROM settings WHERE name='last_scan_state'") result = cursor.fetchone() self.assertEqual(1, int(result[0])) # Second pass redirect_output = RedirectOutput() saveStdOut = sys.stdout sys.stdout = redirect_output scan.scan("test/data", "test/tmp/config.json") sys.stdout = saveStdOut self.assertEqual('11\n', redirect_output.output) # Same number of files cursor.execute("SELECT * FROM audio_file") self.assertEqual(11, cursor.rowcount) # Test if scan code is different cursor.execute( "SELECT value FROM settings WHERE name='last_scan_code'") result = cursor.fetchone() self.assertNotEqual(firstScanCode, result[0]) cursor.close()
def test_success(self, mock_DatabaseManager, mock_format_comment, mock_HandlerManager, mock_qualify): """ It should post a reply and record success to the database. """ mock_qualify.return_value = True mock_HandlerManager.get_handler.return_value = mock_handler mock_format_comment.return_value = FORMATTED_COMMENT scan(mock_subreddit) test_qualify.mock_submission.reply.assert_called_with( FORMATTED_COMMENT) mock_DatabaseManager.write_id.assert_called_with( test_qualify.MOCK_SUBMISSION_ID, DatabaseActionEnum.SUCCESS)
def main(): binary_label = True exclude_stopwords = True data_nosw, data_positive_nosw, data_negative_nosw = (scan.scan('finemedium.txt', exclude_stopwords, binary_label)) data = [] for datum in data_nosw: new_datum = datum[0].split() new_datum.append(datum[1]) data.append(new_datum) #print data positive_review_nosw = ' '.join([row[0] for row in data_positive_nosw]) dict_positive_nosw = utils.get_unigram(positive_review_nosw)[0] positive_words = [x[0] for x in sorted(dict_positive_nosw.items(), key=operator.itemgetter(1), reverse = True)[1:501]] negative_review_nosw = ' '.join([row[0] for row in data_negative_nosw]) dict_negative_nosw = utils.get_unigram(negative_review_nosw)[0] negative_words = [x[0] for x in sorted(dict_negative_nosw.items(), key=operator.itemgetter(1), reverse = True)[1:501]] all_words = positive_words all_words.extend(x for x in negative_words if x not in positive_words) length = len(data) train_data = data[:int(length*.8)] test_data = data[int(length*.8):] decision_tree = dt.train(train_data, all_words) test_results = dt.test(decision_tree, test_data) print test_results
def main(): binary_label = True exclude_stopwords = True data_nosw, data_positive_nosw, data_negative_nosw = (scan.scan('finefoods.txt', exclude_stopwords, binary_label)) data = [] for datum in data_nosw: new_datum = datum[0].split() new_datum.append(datum[1]) data.append(new_datum) positive_review_nosw = ' '.join([row[0] for row in data_positive_nosw]) dict_positive_nosw = utils.get_unigram(positive_review_nosw)[0] positive_words = [x[0] for x in sorted(dict_positive_nosw.items(), key=operator.itemgetter(1), reverse = True)[1:501]] negative_review_nosw = ' '.join([row[0] for row in data_negative_nosw]) dict_negative_nosw = utils.get_unigram(negative_review_nosw)[0] negative_words = [x[0] for x in sorted(dict_negative_nosw.items(), key=operator.itemgetter(1), reverse = True)[1:501]] all_words = positive_words all_words.extend(x for x in negative_words if x not in positive_words) length = len(data) train_data = data[:int(length*.8)] test_data = data[int(length*.8):] decision_tree = dt.train(train_data, all_words) test_results = dt.test(decision_tree, test_data) print test_results
def select_interface(event="x"): s = interface_combo.get() s = s.split(', ') # Grab the IP's and MAC addresses found_ips = scan.scan(net=s[0], interface=s[1]) found_ips = [', '.join(i[::-1]) for i in found_ips] interface = s[1] # Set the found IP's, MAC addresses and make buttons available router_combo['values'] = found_ips router_combo.current(0) router_combo['state'] = "normal" targets_combo['state'] = "normal" targets_combo.delete(0, END) for ip in found_ips: targets_combo.insert(END, ip) attacker_entry['state'] = "normal" packets_entry['state'] = "normal" packets_entry.delete(0, END) packets_entry.insert(0, '100') silent_button['state'] = "normal" attack_button['state'] = "normal" restore_button['state'] = "normal"
def start(): print("Starting main") reddit = praw.Reddit(client_id=config.CLIENT["ID"], client_secret=config.CLIENT["SECRET"], user_agent=config.USER_AGENT, username=config.USERNAME, password=config.PASSWORD) try: scan(reddit.subreddit(config.SUBREDDIT)) except Exception as e: print("Exception occurred while scanning") traceback.print_exc() DatabaseManager.disconnect()
def main(): from config import FINEFOODS ## Scan in data print "Scanning in data" binary_label = True data = scan.scan(FINEFOODS, binary_label=binary_label) data = [(tuple(review.split()), score) for review, score in data] length = len(data) ## Seperate into training and test data print "Seperating into training and test data" train_data = data[:int(length*.8)] test_data = data[int(length*.8):] ## Prepare training unigrams PN = P PN.extend([elem for elem in N if elem not in P]) ## Train print "Making decision tree" decision_tree = dt.train(PN, train_data) ## Test print "Calculating test results" test_results = dt.test(decision_tree, test_data) print "Test results: {}%".format(test_results)
def test_001(): x0 = theano.tensor.fvector('x0') state = theano.tensor.unbroadcast(theano.tensor.shape_padleft(x0), 0) out, _ = scan.scan(lambda x: x + numpy.float32(1), states=state, n_steps=5) fn = theano.function([x0], out[0]) val_x0 = numpy.float32([1, 2, 3]) assert numpy.all(fn(val_x0) == val_x0 + 5)
def runGrammar(self, message): try: ast = parse(parser, scan(message)) printAST(ast) execute.execute(ast, True) except GrammaticalError as e: print "Error:", e
def test_skip_2(self, mock_format_comment, mock_HandlerManager, mock_qualify): """ It should not post a reply if the article qualifies but is too lengthy. """ mock_qualify.return_value = True mock_HandlerManager.get_handler.return_value = MOCK_HANDLER mock_format_comment.return_value = FORMATTED_COMMENT original_comment_length_limit = config.COMMENT_LENGTH_LIMIT config.COMMENT_LENGTH_LIMIT = 1 scan(MOCK_SUBREDDIT) config.COMMENT_LENGTH_LIMIT = original_comment_length_limit test_qualify.MOCK_SUBMISSION.reply.assert_not_called()
def generate_images(image, name): #lists to iterate over :) kernel_list = range(3, 4, 2) iterations = range(1, 3, 1) width_list = range(400, 600, 100) blocksize_list = range(51, 202, 100) constant_list = range(1, 18, 9) thresh_type = ['mean'] #initialize variables original_name = name + '_generated_images' property_dict = {'erode_kernel':1, 'erode_iter':2, 'dilate_kernel':3, 'dilate_iter':4, 'width':5, 'blocksize':6, 'constant':7, 'thresh_type':'temp'} images_generated_b = [] images_generated_s = [] #start the iterations and assign new image class image = cv2.imread(image) for kernel in kernel_list: property_dict['erode_kernel'] = kernel for iteration in iterations: try: property_dict['erode_iter'] = iteration eroded = erode(kernel, iteration, image) for kernel_dilate in kernel_list: property_dict['dilate_kernel'] = kernel_dilate for iteration_dilate in iterations: property_dict['dilate_iter'] = iteration_dilate dilated = dilate(kernel_dilate, iteration_dilate, eroded) for width in width_list: property_dict['width'] = width image_resized = image_resize(dilated, width=width) for blocksize in blocksize_list: property_dict['blocksize'] = blocksize for constant in constant_list: property_dict['constant'] = constant for thresh in thresh_type: property_dict['thresh_type'] = thresh binarized = thresholding(image_resized, blocksize, thresh, constant) image_b = assign_image(binarized, property_dict) print('assigning image') try: scannedname, scanned_image = scan(binarized) print('assigning image') image_s = assign_image(scanned_image, property_dict) except: 'Could not find the contours...' images_generated_b.append(image_b) images_generated_s.append(image_s) except: print('image could not be eroded') images = images_generated_b + images_generated_s return images
def test_error(self, mock_DatabaseManager, mock_format_comment, mock_HandlerManager, mock_qualify): """ It should handle gracefully a ValueError exception raised by the database module when writing a submission ID that already exists in the database. """ mock_qualify.return_value = True mock_HandlerManager.get_handler.return_value = mock_handler mock_format_comment.return_value = FORMATTED_COMMENT mock_DatabaseManager.write_id.side_effect = ValueError() scan(mock_subreddit) test_qualify.mock_submission.reply.assert_called_with( FORMATTED_COMMENT) mock_DatabaseManager.write_id.assert_called_with( test_qualify.MOCK_SUBMISSION_ID, DatabaseActionEnum.SUCCESS)
def predict(captcha): imgs = cut(regularized(Image.open(captcha))) data = [scan(img) for img in imgs] x = pd.DataFrame(data) / 255.0 x = x.values.reshape(-1, 21, 12, 1) model = models.load_model('captcha_model', compile=True) conv = lambda x: chr(x + 48 if 0 <= x <= 9 else x + 87) return list(map(conv, model.predict_classes(x)))
def recognize(imgname='photos\\tough6.jpg', output='output.txt', desired='texts\\chom_tough.txt', show_intermediate_results=False): scan(imgname, show_intermediate_results) img = cv2.imread('deskewed.jpg') img = cv2.dilate(img, np.ones((2, 2))) newimgname = 'no_noise.jpg' cv2.imwrite(newimgname, img) crop(newimgname, "scan_res.jpg", show_intermediate_results) a = pytesseract.image_to_string(Image.open('scan_res.jpg'), config="config") f = open(output, 'w+') print(a, file=f) f.flush() f.close() print('Accuracy: ' + str(test_accuracy(scan_res=output, desired=desired)))
def main(): start = time.time() if config.RUN_FILTER['full']: infile = config.INPUT_FILE else: infile = config.INPUT_FILE_SAMPLE data = None try: if config.RUN_FILTER['2a'] or config.RUN_FILTER['2b']: print 'Loading data from file' data = scan.scan(infile, exclude_stopwords=False, binary_label=True) except IOError as e: print 'ERROR: Cannot open file %s. Please provide a valid INPUT_FILE in config.py\n%s' % (infile, e) exit(1) if config.RUN_FILTER['2a']: problem2a(data, 10) if config.RUN_FILTER['2b']: problem2b(data) data_filtered, train_data, test_data = None, None, None try: if config.RUN_FILTER['2c'] or config.RUN_FILTER['2e'] or config.RUN_FILTER['2f']: print 'Loading data from file (excluding stopwords)' data_filtered = scan.scan(infile, exclude_stopwords=True, binary_label=True) length = len(data_filtered) train_data = data_filtered[:int(length*.8)] test_data = data_filtered[int(length*.8):] except IOError as e: print 'ERROR: Cannot open file %s. Please provide a valid INPUT_FILE in config.py\n%s' % (infile, e) exit(1) if config.RUN_FILTER['2c']: problem2c(data_filtered) if config.RUN_FILTER['2e'] or config.RUN_FILTER['2f']: d_tree = problem2e(train_data) print '\tFinished training decision tree' if config.RUN_FILTER['2f']: problem2f(test_data, d_tree) elapsed = time.time() - start logging.debug('Finished in %s seconds' % elapsed)
def scan_network(request, network_id): #network = Network.objects.get(id=network_id) file_path = '%s/last.xml' % NMAP_RAPORT_PATH raport = scan(file_path) #connections = list( #Connection.objects.select_related("device_1", "device_2").all()) data = json.dumps(raport) out = "var nmap_raport=%s;" % data return HttpResponse(out, mimetype='application/json')
def test_004(): sq = theano.tensor.fvector('sq') nst = theano.tensor.iscalar('nst') out, _ = scan.scan(lambda s:s+numpy.float32(1), sequences=sq, states = [], n_steps = nst) fn = theano.function([sq,nst], out) val_sq = numpy.float32([1,2,3,4,5]) assert numpy.all(fn(val_sq, 5) == val_sq +1)
def test_001(): x0 = theano.tensor.fvector('x0') state = theano.tensor.unbroadcast( theano.tensor.shape_padleft(x0), 0) out, _ = scan.scan(lambda x:x+numpy.float32(1), states = state, n_steps = 5) fn = theano.function([x0], out[0]) val_x0 = numpy.float32([1,2,3]) assert numpy.all(fn(val_x0) == val_x0 +5)
def main(): binary_label = True exclude_stopwords = False data = scan.scan('fineshort.txt', exclude_stopwords, binary_label) length = len(data) all_review = ' '.join([row[0] for row in data]) dict_all = utils.get_unigram(all_review)[0] print(sorted(dict_all.items(), key=operator.itemgetter(1))) train_data = data[:int(length*.8)] test_data = data[int(length*.8):]
def GET(self): if _g.scan==None: _g.scan = scanlib.scan(_g) else: _g.scan.init() i = web.input() startf = float(i['s']) endf = float(i['e']) ret = _g.scan.spectrum(startf,endf) web.header('Content-Type', 'text/json') return json.dumps({"ret":"ok","data":ret})
def main(): binary_label = True data = scan.scan('foods.txt', binary_label) length = len(data) train_data = data[:int(length*.8)] test_data = data[int(length*.8):] decision_tree = dt.train(train_data) test_results = dt.test(decision_tree, train_data)
def scan_for_ports(self): """ Scans the system for open ports @rtype: array @return: an array of available ports """ result=[] ports = scan() if (len(ports) >0): i = 1 while(i<len(ports)): result.append(ports[i][0]+1) i+=1 return result
def test_002(): x0 = theano.tensor.fvector('x0') state = theano.tensor.alloc( theano.tensor.constant(numpy.float32(0)), 6, x0.shape[0]) state = theano.tensor.set_subtensor(state[0], x0) out, _ = scan.scan(lambda x:x+numpy.float32(1), states = state, n_steps = 5) fn = theano.function([x0], out) val_x0 = numpy.float32([1,2,3]) assert numpy.all(fn(val_x0)[-1] == val_x0 +5) assert numpy.all(fn(val_x0)[0] == val_x0)
def main(): binary_label = True exclude_stopwords = True data = scan.scan('finefoods.txt', exclude_stopwords, binary_label) length = len(data) train_data = data[:int(length*.8)] test_data = data[int(length*.8):] decision_tree = dt.train(train_data) #dt.check_tree(decision_tree) test_results = dt.test(decision_tree, test_data) print 'Prediction Accuracy:' print test_results
def findComPorts(self, serial_port): # This function just prints them to the terminal/console. if platform.system() == 'Windows': ports = [] port_index = 0 win_ports = scan.scan() # For Windows, we need to get just the name not the tuple... for each_tuple in win_ports: ports.append(each_tuple[1]) elif platform.system() == 'Linux': ports = scanlinux.scan() else: # We're a Mac ports = scanlinux.scan() #print ports return ports
def test_003(): x0 = theano.tensor.fvector('x0') sq = theano.tensor.fvector('sq') state = theano.tensor.alloc( theano.tensor.constant(numpy.float32(0)), 6, x0.shape[0]) state = theano.tensor.set_subtensor(state[0], x0) out, _ = scan.scan(lambda s, x:x+s, sequences=sq, states = state, n_steps = 5) fn = theano.function([sq, x0], out) val_x0 = numpy.float32([1,2,3]) val_sq = numpy.float32([1,2,3,4,5]) assert numpy.all(fn(val_sq, val_x0)[-1] == val_x0 +15) assert numpy.all(fn(val_sq, val_x0)[0] == val_x0)
def run(self): while True: try: task_host = self.queue.get(block=False) except: break try: if self.mode: port_list = AC_PORT_LIST[task_host] else: port_list = self.config_ini['Port_list'].split('|')[1].split('\n') _s = scan.scan(task_host, port_list) _s.config_ini = self.config_ini # 提供配置信息 _s.statistics = self.statistics # 提供统计信息 _s.run() except Exception, e: print e finally:
def reduce(fn, sequences, outputs_info, non_sequences=None, go_backwards=False, mode=None, name=None): """ Similar behaviour as python's reduce :param fn: The function that ``reduce`` applies at each iteration step (see ``scan`` for more info). :param sequences: List of sequences over which ``reduce`` iterates (see ``scan`` for more info) :param outputs_info: List of dictionaries describing the outputs of reduce (see ``scan`` for more info). :param non_sequences: List of arguments passed to ``fn``. ``reduce`` will not iterate over these arguments (see ``scan`` for more info). :param go_backwards: Boolean value that decides the direction of iteration. True means that sequences are parsed from the end towards the begining, while False is the other way around. :param mode: See ``scan``. :param name: See ``scan``. """ rval = scan.scan(fn=fn, sequences=sequences, outputs_info=outputs_info, non_sequences=non_sequences, go_backwards=go_backwards, truncate_gradient=-1, mode=mode, name=name) if isinstance(rval[0], (list, tuple)): return [x[-1] for x in rval[0]], rval[1] else: return rval[0][-1], rval[1]
def main(): # default is binary outcome and no stopwords binary_label = True exclude_stopwords = True data_nosw, data_positive_nosw, data_negative_nosw = (scan.scan('finefoods.txt', exclude_stopwords, binary_label)) # format data into 2 dimensional array data = [] for datum in data_nosw: # first part of array is each review with words splitted new_datum = datum[0].split() # last item in each array is the label new_datum.append(datum[1]) data.append(new_datum) # get a list of 500 most frequent positive words, ignoring the <br> positive_review_nosw = ' '.join([row[0] for row in data_positive_nosw]) dict_positive_nosw = utils.get_unigram(positive_review_nosw)[0] positive_words = [x[0] for x in sorted(dict_positive_nosw.items(), key=operator.itemgetter(1), reverse = True)[1:501]] # get a list of 500 most frequent negative words, ignoring the <br> negative_review_nosw = ' '.join([row[0] for row in data_negative_nosw]) dict_negative_nosw = utils.get_unigram(negative_review_nosw)[0] negative_words = [x[0] for x in sorted(dict_negative_nosw.items(), key=operator.itemgetter(1), reverse = True)[1:501]] # create non duplicate list of all frequent words from the two lists all_words = positive_words all_words.extend(x for x in negative_words if x not in positive_words) # split training and testing data length = len(data) train_data = data[:int(length*.8)] test_data = data[int(length*.8):] # using a dicision tree utilizing dictionaries decision_tree_dict = dtd.train(train_data, all_words) test_results_dict = dtd.test(decision_tree_dict, test_data) print test_results_dict # the same disicion tree utilizing binary tree decision_tree = dt.train(train_data, all_words) test_results = dt.test(decision_tree, test_data) print test_results
def map(fn, sequences, non_sequences=None, truncate_gradient=-1, go_backwards=False, mode=None, name=None): """ Similar behaviour as python's map. :param fn: The function that ``map`` applies at each iteration step (see ``scan`` for more info). :param sequences: List of sequences over which ``map`` iterates (see ``scan`` for more info). :param non_sequences: List of arguments passed to ``fn``. ``map`` will not iterate over these arguments (see ``scan`` for more info). :param truncate_gradient: See ``scan``. :param go_backwards: Boolean value that decides the direction of iteration. True means that sequences are parsed from the end towards the begining, while False is the other way around. :param mode: See ``scan``. :param name: See ``scan``. """ return scan.scan(fn=fn, sequences=sequences, outputs_info=[], non_sequences=non_sequences, truncate_gradient=truncate_gradient, go_backwards=go_backwards, mode=mode, name=name)
def test_9(): assert scan([-1, -2, -3]) == -3
def test_10(): assert scan([-3, -2, -1]) == -6
def test_scan_consecutive(): assert scan([1, 2, 3]) == 6
def test_1(): assert scan([2, 2, 3]) == 5
from parse import parse from execute import execute from errors import GrammaticalError from ast import printAST if __name__ == '__main__': import sys if len(sys.argv) > 1: filename = sys.argv[1] f = open(filename) else: f = sys.stdin while True: line = f.readline() if line == '': break if line == '\n': continue print ">", line, try: ast = parse(scan(line)) printAST(ast) execute(ast, f == sys.stdin) except GrammaticalError as e: print "Error:", e if f != sys.stdin: f.close() print 'ok'
if not args: print("TARGET is missing.") parser.print_help() sys.exit(EXIT_FAILURE) else: for target in args: if not valid_ip_address(target): print("TARGET \"%s\" is not valid." % target) parser.print_help() sys.exit(EXIT_FAILURE) else: targets.append(target) # Do the scan import scan if scan_type == 'ping': s = scan.ping(scan_type, targets) elif scan_type == 'sweep': s = scan.sweep(scan_type, targets) else: s = scan.scan(scan_type, targets) s.send_icmp_packet() s.recv_icmp_packet() s.show_results() sys.exit(EXIT_SUCCESS) ##############################################################################
import sys from subprocess import Popen, PIPE def in_use (device): if 'windows' in sys.platform: # TODO: use Handle # http://stackoverflow.com/questions/18059798/windows-batch-equivalent-of-fuser-k-folder # https://technet.microsoft.com/en-us/sysinternals/bb896655 return False pipe = Popen(['fuser', device], stdout=PIPE, stderr=PIPE) stdout, stderr = pipe.communicate( ) return stdout is not "" if __name__ == '__main__': from scan import scan candidate = (sys.argv[1:2] or [scan( )]).pop( ) print in_use(candidate)
# Tried to check in but checkins are disabled print _('Smolt set to checkin but checkins are disabled (hint: service smolt start)') sys.exit(6) # read the profile profile = smolt.get_profile() if opts.new_pub: pub_uuid = profile.regenerate_pub_uuid(user_agent=opts.user_agent, smoonURL=opts.smoonURL, timeout=opts.timeout) print _('Success! Your new public UUID is: %s' % pub_uuid) sys.exit(0) if opts.scanOnly: scan(profile, opts.smoonURL) rating(profile, opts.smoonURL) sys.exit(0) for line in profile.getProfile(): try: print line except UnicodeEncodeError: pass if not opts.autoSend: if opts.printOnly: sys.exit(0) else: try: send = raw_input('\n' +