def test_loadFile_wrong_file(self): f1 = FileIO("tests/test1.csv", "r") loaded1 = f1.loadFile() f1.close() f2 = FileIO("tests/test2.csv", "r") loaded2 = f2.loadFile() f2.close() self.assertEqual((loaded1 == loaded2), False)
def test_readlines(self): filename = "kasta.env" assert not path.exists(filename) contents = ["one\n", "two three\n"] FileIO().writelines(filename, contents) assert FileIO().readlines(filename) == contents remove(filename) assert not path.exists(filename)
def test_writelines(self): filename = "kasta.env" assert not path.exists(filename) lines = ["first\n", "Second\n"] FileIO().writelines(filename, lines) assert path.exists(filename) assert FileIO().readlines(filename) == lines remove(filename) assert not path.exists(filename)
def test_close_file(self): if (os.path.exists("foo")): os.remove("foo") f = FileIO("foo", "w") with self.assertRaises(Exception): f.close()
def main(): """ Get configuration, get driver, and build handler and start it. """ args = get_args() port = args.port[0] baud = args.baud[0] use_ftdi = args.ftdi # Driver with context with serial_link.get_driver(use_ftdi, port, baud) as driver: # Handler with context with Handler(Framer(driver.read, driver.write)) as link: link.add_callback(serial_link.log_printer, SBP_MSG_LOG) link.add_callback(serial_link.printer, SBP_MSG_PRINT_DEP) data = open(args.file, 'rb').read() def progress_cb(size): sys.stdout.write("\rProgress: %d%% \r" % (100 * size / len(data))) sys.stdout.flush() print('Transferring image file...') FileIO(link).write("upgrade.image_set.bin", data, progress_cb=progress_cb) print('Committing file to flash...') code = shell_command(link, "upgrade_tool upgrade.image_set.bin", 300) if code != 0: print('Failed to perform upgrade (code = %d)' % code) return print('Resetting Piksi...') link(MsgReset(flags=0))
def fileOpen(self) -> None: if not self._menuFile.isEnabled(): return result: Tuple[str, str] = QFileDialog.getOpenFileName( self, "Select file", "", "JSON Files (*.json);;All Files (*.*)") if len(result) != 2 or len(result[0]) == 0: return fn = result[0] print("FILE READ \"{}\"".format(fn)) self.polygonDataHelper.clearAll() hdl = FileIO(self.polygonFactory) _, errs = hdl.readFile(fn) errs += self.polygonDataHelper.updateAllPolygonCache() self.polygonDataHelper.generateMapping() self._polygonList.polygonsChange() self._rasterSurface.repaint() if len(errs) > 0: ErrorListDrawer( "There were some errors while importing the JSON file:", errs, self).show()
def OnFdTextChanged(root, pgbar, pglabel, btn, entry, ft_label): fd_name = entry.get() file_io = FileIO() ft_existed = False if file_io.check_obj("save"): pic_dataset_paths = file_io.load_obj("save") for path in pic_dataset_paths: if path == fd_name: ft_existed = True break if ft_existed: pgbar["value"] = 100 pglabel.configure(text="100.00%") btn["text"] = "重新提取特征" if getattr(sys, 'frozen', False): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) ft_name = "feature_" + hashlib.md5(fd_name.encode()).hexdigest() + ".h5" ft_label["text"] = os.path.abspath(os.path.join(application_path, ft_name)) else: pgbar["value"] = 0 pglabel.configure(text="0.00%") btn["text"] = "提取特征" ft_label["text"] = "当前文件夹没有特征被提取" root.update()
def main(self, argv): try: opts, arg = getopt(argv, "hti:u:", ["help", "test", "input=", "user="******" {}'.format(opt)) self.usage() sys.exit(2) elif opt in ('-i', '--input'): logger.info('Detected switch" {}'.format(opt)) self.input_file = arg elif opt in ('-u', '--user'): logger.info('Detected switch" {}'.format(opt)) self.made_by = arg elif opt in ('-t', '--test'): logger.info('Detected switch" {}'.format(opt)) self.CONFIG = 'Test' self.show_params() file_io = FileIO() file_io.read(self.input_file, self.made_by, self.CONFIG)
def test___init___file_exists(self): if (os.path.exists("foo")): os.remove("foo") f = open("foo", "w") f.close() with self.assertRaises(Exception): FileIO("foo", "w")
def ShowOriginalImage(event): img_no = int(str(event.widget).split(".")[-1]) file_io = FileIO() imlist = file_io.load_obj("imlist") ipath = imlist[img_no] img = mpimg.imread(ipath) imgplot = plt.imshow(img) plt.show()
def __init__(self, filename=None): self.__sheet = Sheet() self.__cursor = Cursor() self.__io = FileIO(filename) self.__programIsRunning = True self.__lastCommand = "" self.load()
def __init__(self): #load configuration settings _config = FileIO().loadJSONFile("config.json") self.twData = _config["twitter_auth_data"] self.mongoData = _config["mongo_config"] #parse command line options self.parser = ArgumentParser( description= "Grabs data from the Twitter API and stores it in a MongoDB Database." ) self.parser.add_argument('-n', '--name', type=str, nargs=1, required=True, help='Unique name of the current search.') self.parser.add_argument( '-t', '--terms', type=str, nargs='+', required=False, help= 'Provide a list of search terms used to collect data from the API.' ) self.parser.add_argument( '-f', '--file', type=str, nargs=1, required=False, help= 'Provide a path to a JSON file of terms to collect data from the API.' ) self.parser.add_argument( '-c', '--count', type=int, required=False, help="Collect the specified number of tweets.") self.parser.add_argument( '-l', '--listen', action='store_true', required=False, help="Just open the stream and listen. Don't connect to database.") self.args = self.parser.parse_args()
def __init__(self, image_path, output_filename): self._point_coords = [] self._point_count = 0 self._line_slope = 0 self._lock_gui = False self._output_filename = output_filename self._file = FileIO("./data/") self._original_img = cv2.imread(image_path, cv2.IMREAD_COLOR) self._new_img = self._original_img.copy() self.setup_image_gui(self._new_img)
def AnalyseInputImage(queryDir, maxNo, model_name, img_canvas, root): h5f = h5py.File(model_name,'r') feats = h5f['dataset_1'][:] imgNames_utf = h5f['dataset_2'][:] imgNames = [] for i in imgNames_utf: imgNames.append(i.decode('utf-8')) h5f.close() model = VGGNet() queryVec = model.extract_feat(queryDir) scores = np.dot(queryVec, feats.T) rank_ID = np.argsort(scores)[::-1] rank_score = scores[rank_ID] maxres = int(maxNo) imlist = [imgNames[index] for i,index in enumerate(rank_ID[0:maxres])] file_io = FileIO() file_io.save_obj(imlist,"imlist") img_canvas.delete('all') vsbar = Scrollbar(frame_canvas, orient=VERTICAL, command=img_canvas.yview) vsbar.grid(row=0, column=1, sticky=NS) vsbar.config(command=img_canvas.yview) img_canvas.configure(yscrollcommand=vsbar.set) frame_images = Frame(img_canvas, bg="grey") img_canvas.create_window((0,0), window=frame_images, anchor='nw') img_no = 0 max_in_row = 0 height_total = 0 for i in imlist: basewidth = 300 img = Image.open(i) wpercent = (basewidth/float(img.size[0])) hsize = int((float(img.size[1])*float(wpercent))) max_in_row = max(max_in_row, hsize) img = img.resize((basewidth,hsize), Image.ANTIALIAS) render = ImageTk.PhotoImage(img) img_show = Label(frame_images, image=render, name=str(img_no)) img_show.bind("<Button-1>", ShowOriginalImage) img_show.image = render img_show.grid(row=img_no//3, column=img_no%3) img_no += 1 if img_no%3==0: height_total += max_in_row max_in_row = 0 frame_canvas.config(height=height_total) root.update() img_canvas.config(scrollregion=img_canvas.bbox("all"))
def fileSaveAs(self) -> None: if not self._menuFile.isEnabled(): return result: Tuple[str, str] = QFileDialog.getSaveFileName( self, "Select file", "", "JSON Files (*.json);;All Files (*.*)") if len(result) != 2 or len(result[0]) == 0: return fn = result[0] print("FILE WRITE \"{}\"".format(fn)) hdl = FileIO(self.polygonFactory) _, err = hdl.writeFile(fn) if err is not None: ErrorListDrawer( "There were some errors while exporting the JSON file:", [err], self).show()
def startStreaming(self): if self.db and self.auth: terms = [] if self.args.file: #load a list of terms from a file data = FileIO().loadFile(self.args.file[0]) terms = [term for term in data.strip().split('\n')] elif self.args.terms: #read in a list of terms from the commandline terms = self.args.terms self.db.tweets.insert( {"meta_data": { "terms": terms, "name": self.args.name[0] }}) #connect to twitter stream and collect some tweets! if terms: print "\n\nEnter [x] to quit the stream..." print "Connecting to stream..." listener = MongoStreamListener(self.db, listen=self.args.listen, limit=self.args.count, name=self.args.name[0]) streamer = tweepy.Stream(auth=self.auth, listener=listener, timeout=60) print "Connected. Filtering tweets...\n" streamer.filter(None, terms, async=True) while True: opt = getpass('') if opt == 'x': break print "Quitting..." streamer.disconnect() else: self.parser.print_usage()
def test_loadFile_good_file(self): f = FileIO("tests/test1.csv", "r") loaded = f.loadFile() f.close() reference = [ Cluster( '0', [Point([1, 1, 1]), Point([2, 2, 2]), Point([3.0, 3.0, 3.0])]), Cluster('1', [ Point([1, 1, 1]), Point([1, 1, 1]), Point([1, 1, 1]), Point([1, 1, 1]), Point([1, 1, 1]), Point([1, 1, 1]), ]), Cluster('2', [Point([1, 1, 1])]) ] self.assertEqual(loaded, reference)
def GetFoldernameFromSelector(root, e): root.update() filename = askdirectory() if filename != '': file_io = FileIO() if file_io.check_obj("save"): pic_dataset_paths = file_io.load_obj("save") ft_existed = 0 for path in pic_dataset_paths: if path == filename: break ft_existed += 1 if ft_existed >= len(pic_dataset_paths): pic_dataset_paths.append(filename) else: tmp = pic_dataset_paths[ft_existed] pic_dataset_paths[ft_existed] = pic_dataset_paths[-1] pic_dataset_paths[-1] = tmp else: pic_dataset_paths = [filename] e["values"] = pic_dataset_paths e.current(len(pic_dataset_paths) - 1) root.update()
def test___init___file_does_not_exist(self): with self.assertRaises(Exception): FileIO("foo", "r")
# Show feature path Label(root, text="提取特征存储文件名称:").grid(row=1, column=0, sticky=W) feature_text = "暂时没有特征被提取" label_feature_name = Label(root, text=feature_text) label_feature_name.grid(row=1, column=1, sticky=W) # Setup progress bar for feature extraction pgbar = ttk.Progressbar(root, orient=HORIZONTAL, length=550, mode='determinate') pgbar.grid(row=2, column=1, sticky=W) l_pg = Label(root, text="0.00%") l_pg.grid(row=2, column=1, sticky=E) # Create database and extract features Label(root, text="图片库路径:").grid(row=0, column=0, sticky=W) file_io = FileIO() dbFolder_text = [""] if file_io.check_obj('save'): dbFolder_text = file_io.load_obj('save') e_dbFolder = ttk.Combobox(root, values=dbFolder_text, width=80) e_dbFolder.grid(row=0, column=1, sticky=W) Button(root, text="浏览文件夹", command=lambda: GetFoldernameFromSelector(root, e_dbFolder)).grid(row=0, column=1, sticky=E) ft_extract_btn = Button(root, text="提取特征", command=lambda: GenerateFeatureDatabase(e_dbFolder, label_feature_name, pgbar, l_pg, root)) ft_extract_btn.grid(row=1, column=1, sticky=E) e_var = StringVar() e_var.trace("w", lambda a, b, c: OnFdTextChanged(root, pgbar, l_pg, ft_extract_btn, e_dbFolder, label_feature_name)) e_dbFolder["textvar"] = e_var # Setup the maximum number of similar images Label(root, text="最大图片显示数量:").grid(row=4, column=0, sticky=W) e_maxRsltNo = Entry(root, width=20)
import random import sys import pygame import numpy from snake import Snake from foodspawner import FoodSpawner from fileio import FileIO baseFramerate = 500 size = 500 fIO = FileIO() def gameover(quitGame=False, score = 0, framerate = 0): pygame.quit() fIO.printScore(score,framerate) if quitGame == True: sys.exit(0) init(baseFramerate) def userInput(snake): for event in pygame.event.get(): if event.type == pygame.QUIT: gameover(True) elif event.type == pygame.KEYDOWN: if event.key == pygame.K_RIGHT: snake.changeDirTo("R") if event.key == pygame.K_LEFT: snake.changeDirTo("L")
def GenerateFeatureDatabase(fd_entry, ft_label, pgbar, pglabel, root): ft_name = "feature" fd_name = fd_entry.get() root.update() if os.path.exists(fd_name) and os.path.isdir(fd_name): pass else: messagebox.showerror(title="文件夹读取错误", message="无法读取文件或文件夹不存在") return file_handler = FileHandler() # img_list = file_handler.get_imlist(fd_name) img_list = [os.path.join(dp, f) for dp, dn, filenames in os.walk(fd_name) for f in filenames if os.path.splitext(f)[1] == '.jpg' or os.path.splitext(f)[1] == '.jpeg'] # print(img_list) print("--------------------------------------------------") print(" feature extraction starts") print("--------------------------------------------------") feats = [] names = [] model = VGGNet() ite_no = 0 ite_to = len(img_list) error_occured = False if getattr(sys, 'frozen', False): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) for i, img_path in enumerate(img_list): try: ite_no += 1 norm_feat = model.extract_feat(img_path) img_name = img_path feats.append(norm_feat) names.append(img_name.encode('utf-8')) print("extracting feature from image No. %d , %d images in total" %((i+1), len(img_list))) pgbar['value'] = (100 * ite_no) / ite_to print(pgbar['value']) label_content="{0:0.2f}%".format(pgbar['value']) pglabel.configure(text=label_content) root.update() except Exception as e: error_occured = True logger_fname = os.path.join(application_path, 'error.log') now = datetime.now() dt_string = now.strftime("%d/%m/%Y %H:%M:%S") with open(logger_fname, "a+", encoding='utf-8') as f: f.write('[Error] ' + dt_string + "\n") f.write('[Trace Back] ' + str(e) + "\n") f.write('[Detail] ' + img_path + "\n") feats = np.array(feats) print("--------------------------------------------------") print(" writing feature extraction results ...") print("--------------------------------------------------") if error_occured: messagebox.showerror(title="提取特征时发生错误", message="提取特征时发生错误,程序跳过了错误文件完成特征提取,请将error.log文件,以及后台截图发给开发者。") output = ft_name + "_" + hashlib.md5(fd_name.encode()).hexdigest() + ".h5" output = os.path.join(application_path, output) h5f = h5py.File(output, 'w') h5f.create_dataset('dataset_1', data = feats) h5f.create_dataset('dataset_2', data = np.string_(names)) h5f.close() ft_label.configure(text=os.path.abspath(output)) # Load array from file and update the file path array file_io = FileIO() if file_io.check_obj("save"): pic_dataset_paths = file_io.load_obj("save") ft_existed = 0 for path in pic_dataset_paths: if path == fd_name: break ft_existed += 1 if ft_existed >= len(pic_dataset_paths): pic_dataset_paths.append(fd_name) else: tmp = pic_dataset_paths[ft_existed] pic_dataset_paths[ft_existed] = pic_dataset_paths[-1] pic_dataset_paths[-1] = tmp print(pic_dataset_paths) else: pic_dataset_paths = [fd_name] file_io.save_obj(pic_dataset_paths,"save") fd_entry["values"] = pic_dataset_paths fd_entry.current(len(pic_dataset_paths) - 1)
def __init__(self, fileIO=FileIO()): self.fileio: FileIO = fileIO
def reset_factory_defaults(self): # Delete settings file fio = FileIO(self.link) fio.remove('config') # Reset the Piksi self.link.send_message(ids.RESET, '')
from data import Data from fileio import FileIO gData = Data({ 'metricPreprocessor': 'euclidean', 'groupGenerator': 'basic', 'solutionGenerator': 'basic', 'localImprover': 'basic' }) fileIO = FileIO( gData, { 'problem': 'input.txt', 'data': 'data', 'groups': 'groups', 'solutions': 'solutions' }, gData.version())
from fileio import FileIO from nltk import word_tokenize from preprocess import Clean io = FileIO() clean = Clean() # get reviews failed to judge reasons def make_label(stars): if int(float(stars)) > 3: return 2 # positive elif int(float(stars)) < 3: return 0 # negative else: return 1 # neutral def write_tuple_data(data, path_write): corpus = [] for label, revi in data: corpus.append(str(label) + "--" + revi) io.write_file_text('\n'.join(corpus), path_write) def filterTestDataByDict(path_file, path_write_raw, path_write_clean, path_dictionary): """ :return: filter data by dictionary """