def __init__(self): super(Run_2_content_Form, self).__init__() self.setup_ui(self) self.retranslate_ui(self) # globe.time = "%d" % (time.time() * 1000) seven = 60 * 60 * 1000 * 24L * long(globe.seven) globe.time_seven = long(globe.time) - seven self.wr = writer.Writer(globe.update_path) self.wr_delete = writer.Writer(globe.delete_path) self.mysql_conn = mysql.MYSQL()
def run(self): # Read FRD-file base_name = os.path.basename(self.file_name) logging.info('Reading ' + base_name) p = reader.FRD(self.file_name) l = len(p.times) # If file contains mesh data if p.node_block and p.elem_block: for fmt in self.fmt_list: if l: """ If model has many time steps - many output files will be created. Each output file's name should contain increment number padded with zero """ print() counter = 1 times_names = {} # {increment time: file name, ...} for t in p.times: if l > 1: ext = '.{:0{width}}.{}'\ .format(counter, fmt, width=len(str(l))) file_name = self.file_name.replace('.frd', ext) else: ext = '.{}'.format(fmt) file_name = self.file_name.replace('.frd', ext) times_names[t] = file_name counter += 1 # For each time increment generate separate .vt* file # Output file name will be the same as input for t, file_name in times_names.items(): base_name = os.path.basename(file_name) logging.info('Writing ' + base_name) w = writer.Writer(p, file_name, t) if fmt == 'vtk': w.write_vtk() if fmt == 'vtu': w.write_vtu() # Write ParaView Data (PVD) for series of VTU files if l > 1 and fmt == 'vtu': writer.write_pvd(self.file_name\ .replace('.frd', '.pvd'), times_names) else: file_name = self.file_name[:-3] + fmt w = writer.Writer(p, file_name, None) if fmt == 'vtk': w.write_vtk() if fmt == 'vtu': w.write_vtu() else: logging.warning('File is empty!')
def main(): rd = reader.Pktsrc("-") wr = writer.Writer(sys.stdout, snaplen=65535, linktype=dpkt.pcap.DLT_RAW) # for stats stat_ok = 0 stat_dropped = 0 for pkt in rd: try: # decode and find IP layer ip = pkt.ip() except: stat_dropped += 1 continue # drop non-TCP/UDP if pkt.p() not in [dpkt.ip.IP_PROTO_TCP, dpkt.ip.IP_PROTO_UDP]: stat_dropped += 1 continue # check IP checksum if not pkt.ip_ok(True): stat_dropped += 1 continue stat_ok += 1 wr.writepkt(ip, pkt.len, pkt.ts) wr.close()
def main(self): """総理大臣のCSVファイルをHTMLページへと変換するメインプログラム。""" # ホームディレクトリの直下のデスクトップのディレクトリに、 # SouriDaijinというディレクトリを作成する。 # すでに存在すれば、当該ディレクトリを消して、新たに作り、 # 存在しなければ、当該ディレクトリを作成する。 home_directory = os.environ['HOME'] base_directory = home_directory + '/Desktop/PrimeMinisters/' if os.path.isdir(base_directory): shutil.rmtree(base_directory) os.makedirs(base_directory) # ダウンローダに必要なファイル群をすべてダウンロードしてもらい、 # 入力となるテーブルを獲得する。 a_downloader = downloader.Downloader(base_directory) a_table = a_downloader.download_all() print a_table # トランスレータに入力となるテーブルを渡して変換してもらい、 # 出力となるテーブルを獲得する。 a_translator = translator.Translator(a_table) a_table = a_translator.table() print a_table # ライタに出力となるテーブルを渡して、 # Webページを作成してもらう。 a_writer = writer.Writer(base_directory, a_table) a_writer.write() return 0
def main(): argparser = argparse.ArgumentParser( description= 'Analyze pickle or json files of tweets and determine how often and when a user tweets.' ) argparser.add_argument("infile", help="The file or folder you wish to analyze") argparser.add_argument("outfile", help="The destination pickle file") args = vars(argparser.parse_args()) infile, outfile = args['infile'], args['outfile'] analyzer = user_analyzer.User_Analyzer() if os.path.isdir(infile): data = analyzer.analyze_user_folder(infile) elif os.path.isfile(infile): data = analyzer.analyze_user_file(infile) else: print(infile + " is not a valid file.") return if data == []: return pickle_writer = writer.Writer() pickle_writer.write_to_pickle(data, outfile)
def checkStrength(): results = str(subprocess.check_output(["/sbin/iwconfig", "wlan0"])) m = re.search('ESSID:"(.+)"', results) ssid = m.group(1) m = re.search('Link Quality=(\d+)/(\d+)', results) width = round((float(m.group(1)) / float(m.group(2))) * 17) w = writer.Writer() w.scroll(ssid) draw(width)
def main(): filenames = ('a_example.in', 'b_should_be_easy.in', 'c_no_hurry.in', 'd_metropolis.in', 'e_high_bonus.in') filename = filenames[1] out_filename = '../output/{}_MCTS.out'.format( filename[:filename.index('.')]) r = reader.Reader('../dataset/' + filename) a = algorithm.MCTSAlgorithm(r.get_rides(), r.get_meta_info()) a.assign_rides() w = writer.Writer(a.assigned_rides, out_filename)
def __init__(self): self.catg = {u'综合新闻':6, u'娱乐':3, u'全球':1 ,u'商业&财经':5, u'科技':4, u'生活':14, u'健康&美容':14, u'体育':2, u'搞笑':11, u'食物':12, u'汽车':8, u'旅行':13, u'教育':9} base_client.BaseClient.__init__(self) self.task_type = "Pages" self.writer = writer.Writer() self.writer.init('../conf/selectsvr.conf') try: self.mongo = MongoClient(mongo_conf.host, mongo_conf.port) self.conn = self.mongo[mongo_conf.db] except Exception, e: logging.error("Client init error: %s" % e) self.conn = None
def __init__(self): #Display connected through SPI bus self.disp = ssd1306.SSD1306_SPI(128, 64, board.display_spi, dc = board.display_dc, res = board.display_res, cs = board.display_cs, external_vcc=False, mirror_v=True, mirror_h=True) # #variant: display connected through I2C bus # from machine import Pin, I2C # self.disp = ssd1306.SSD1306_I2C(128, 64, I2C(1)) self.disp.contrast(100) #0~255 TODO: Test ouside in bright sunlight self.disp_image(images.logo) self.erase_time = time.ticks_ms() + 2000 self.font_big = writer.Writer(self.disp.framebuf, font_big) self.font_big.set_clip(False, True, False) self.font_med = writer.Writer(self.disp.framebuf, font_med) self.font_med.set_clip(False, True, True) self.font_small = writer.Writer(self.disp.framebuf, font_small) self.font_small.set_clip(False, True, True) self.slider_last_val = 0 self.slider_last_time = 0
def __init__(self): self.audio = utils.music self.ispaused = True self.cursong = None self.curstyle = None self.prevsong = None self.sorting = False self._deltat = 0 self.cursongtime = 0 self.songlist = songlist.SongList() self.styles = list(self.songlist.styles.values()) self.writer = writer.Writer(self.helpstrings) self.writer_update()
def main(self): """総理大臣のCSVファイルをHTMLページへと変換するメインプログラム。""" # ホームディレクトリの直下のデスクトップのディレクトリに、 # PrimeMinistersというディレクトリを作成する。 # すでに存在すれば、当該ディレクトリを消して、新たに作り、 # 存在しなければ、当該ディレクトリを作成する。 #ここからデバックのためのコメントアウト多数につき注意 #print "[example]ホームディレクトリを指定:", home_directory = os.environ['HOME'] #print home_directory print "[example]ベースディレクトリを指定:", base_directory = home_directory + '/Desktop/PrimeMinisters/' print base_directory if os.path.isdir(base_directory): print "[example]ベースディレクトリからディレクトリを削除" shutil.rmtree(base_directory) print "[example]ベースディレクトリからディレクトリを作成" os.makedirs(base_directory) # ダウンローダに必要なファイル群をすべてダウンロードしてもらい、 # 入力となるテーブルを獲得する。 a_downloader = downloader.Downloader(base_directory) a_table = a_downloader.download_all() print a_table # トランスレータに入力となるテーブルを渡して変換してもらい、 # 出力となるテーブルを獲得する。 a_translator = translator.Translator(a_table) a_table = a_translator.table() print a_table # ライタに出力となるテーブルを渡して、 # Webページを作成してもらう。 a_writer = writer.Writer(base_directory, a_table) a_writer.write() return 0
def main(): '''entry point for command-line interface''' user_parms = get_user_parameters() spec_data_file_name_list = user_parms.infile if user_parms.scan_list != SCAN_LIST_ALL: user_parms.scan_list = parse_scan_list_spec(user_parms.scan_list) if not user_parms.hdf5_extension.startswith(os.extsep): user_parms.hdf5_extension = os.extsep + user_parms.hdf5_extension for spec_data_file_name in spec_data_file_name_list: if not os.path.exists(spec_data_file_name): msg = 'File not found: ' + spec_data_file_name print msg continue if user_parms.reporting_level in (REPORTING_STANDARD, REPORTING_VERBOSE): print 'reading SPEC data file: ' + spec_data_file_name spec_data = spec.SpecDataFile(spec_data_file_name) scan_list = pick_scans(spec_data.getScanNumbers(), user_parms.scan_list) if user_parms.reporting_level in (REPORTING_VERBOSE): print ' discovered', len(spec_data.scans.keys()), ' scans' print ' converting scan number(s): ' + ', '.join( map(str, scan_list)) basename = os.path.splitext(spec_data_file_name)[0] nexus_output_file_name = basename + user_parms.hdf5_extension if user_parms.force_write or not os.path.exists( nexus_output_file_name): out = writer.Writer(spec_data) out.save(nexus_output_file_name, scan_list) if user_parms.reporting_level in (REPORTING_STANDARD, REPORTING_VERBOSE): print 'wrote NeXus HDF5 file: ' + nexus_output_file_name
def main(): argparser = argparse.ArgumentParser( description= 'Parse json files of tweets and export them to a pickle file.') argparser.add_argument("infile", help="The file or folders you wish to parse") argparser.add_argument("outfile", help="The destination pickle file") args = vars(argparser.parse_args()) infile, outfile = args['infile'], args['outfile'] json_parser = parser.Parser() if os.path.isdir(infile): data = json_parser.parse_json_folder(infile) elif os.path.isfile(infile): data = json_parser.parse_json_file(infile) else: print(infile + " is not a valid file.") return pickle_writer = writer.Writer() pickle_writer.write_to_pickle(data, outfile)
def main(): argparser = argparse.ArgumentParser( description= 'Count the number of tweets per user from the reduced tweet files.') argparser.add_argument("infile", help="The file or folder you wish to count") argparser.add_argument("outfile", help="The destination pickle file") argparser.add_argument( "--count_file", help="A (optional) pickle file of counts that you wish to append to.", type=str, default="") args = vars(argparser.parse_args()) infile, outfile, count_file = args['infile'], args['outfile'], args[ 'count_file'] counter = tweet_counter.Tweet_Counter() if os.path.isfile(count_file): with open(count_file, 'rb') as data_file: data = pickle.load(data_file) else: data = {} if os.path.isdir(infile): data = counter.count_tweets_folder(infile, data) elif os.path.isfile(infile): data = counter.count_tweets_file(infile, data) else: print(infile + " is not a valid file or folder.") return if data == {}: return pickle_writer = writer.Writer() pickle_writer.write_to_pickle(data, outfile)
def display_temperature_and_humidity(temperature, humidity): i2c = machine.I2C(scl=machine.Pin(config.DISPLAY_SCL_PIN), sda=machine.Pin(config.DISPLAY_SDA_PIN)) if 60 not in i2c.scan(): raise RuntimeError('Cannot find display.') display = ssd1306.SSD1306_I2C(128, 64, i2c) font_writer = writer.Writer(display, freesans20) temperature_pbm = load_image('temperature.pbm') units_pbm = load_image('fahrenheit.pbm') if config.FAHRENHEIT \ else load_image('celsius.pbm') humidity_pbm = load_image('humidity.pbm') percent_pbm = load_image('percent.pbm') display.fill(0) display.rect(0, 0, 128, 64, 1) display.line(64, 0, 64, 64, 1) display.blit(temperature_pbm, 24, 4) display.blit(humidity_pbm, 88, 4) display.blit(units_pbm, 28, 52) display.blit(percent_pbm, 92, 52) text = '{:.1f}'.format(temperature) textlen = font_writer.stringlen(text) font_writer.set_textpos((64 - textlen) // 2, 30) font_writer.printstring(text) text = '{:.1f}'.format(humidity) textlen = font_writer.stringlen(text) font_writer.set_textpos(64 + (64 - textlen) // 2, 30) font_writer.printstring(text) display.show() time.sleep(10) display.poweroff()
# Using https://github.com/peterhinch/micropython-font-to-py: # import the font and the writer code import freesans20 import writer # create a writer instance font_writer = writer.Writer(display, freesans20) # set writing position font_writer.set_textpos(0, 0) # write some text! font_writer.printstring("hello") # calculate how many pixels wide a text is len = font_writer.stringlen("hello")
device, label_type[i], label_bucket, use_lm=use_lm, use_elmo=use_elmo)) data_test.append( io_utils.read_data_to_tensor(test_path[i], word_word2index, char_word2index, label_word2index_list[i], device, label_type[i], label_bucket, use_lm=use_lm, use_elmo=use_elmo)) writers.append(writer.Writer(label_word2index_list[i])) if use_elmo: word_table = (option_path, weight_path) else: word_table = io_utils.construct_word_embedding_table( embedd_dict, embedd_dim, word_word2index) logger.info("Constructing network...") network = base_model.BaseModel(embedd_dim, word_word2index.size(), char_dim, char_word2index.size(), num_labels[-1], num_filters, window, rnn_mode,
def __init__(self): self.i2c = machine.I2C(-1, machine.Pin(5), machine.Pin(4)) self.oled = ssd1306.SSD1306_I2C(128,64,self.i2c) self.inc14 = writer.Writer(self.oled, inconsolata14) self.free20 = writer.Writer(self.oled, freesans20) self.inc14.set_clip(True,True)
from gmusicapi import Mobileclient # Imports are stupid. mc = Mobileclient() # noqa Our interface to Google Play Music. import songqueue import view import writer from os.path import expanduser, join # Location where we keep songs, playlists, libraries, and source code. DATA_DIR = join(expanduser('~'), '.local', 'share', 'pmcli') # Location where we keep user and mpv configurations. CONFIG_DIR = join(expanduser('~'), '.config', 'pmcli') q = songqueue.Queue() # Queue/playlist. w = writer.Writer(None, None, None, None, curses=False) # Output handler. v = view.View() # Main window contents.
import argparse parser = argparse.ArgumentParser( description= "This will take a court record pdf from CIPRS software and use the data in the pdf to write an expungement petition form pdf." # noqa ) parser.add_argument("input_path", help="The path to the CIPRS court record pdf") parser.add_argument( "template_path", help= "The path to the empty expungement petition form with field annotations.", ) parser.add_argument("output_path", help="The path desired for the filled petition form.") args = parser.parse_args() input_path = args.input_path template_path = args.template_path output_path = args.output_path pdf = reader.PDFToTextReader(input_path) pdf.parse() json = json.loads(pdf.json()) petition = writer.Writer(json, map_data, template_path, output_path) petition.get_annotations() petition.write()
import reader import writer import algorithm def main(): <<<<<<< HEAD filenames = ('a_example.in', 'b_should_be_easy.in', 'c_no_hurry.in', 'd_metropolis.in', 'e_high_bonus.in') filename = filenames[4] out_filename = '../output/{}.out'.format(filename[:filename.index('.')]) r = reader.Reader('../dataset/' + filename) ======= filenames = ('../dataset/a_example.in', '../dataset/b_should_be_easy.in', '../dataset/c_no_hurry.in', '../dataset/d_metropolis.in', '../dataset/e_high_bonus.in') filename = filenames[4] out_filename = '../output/oute.out' r = reader.Reader(filename) >>>>>>> 07b3f6c9f3b6983d8aac2a0e034d73a7ba6c97bd a = algorithm.GreedyAlgorithm(r.get_rides(), r.get_meta_info()) a = algorithm.RideAssigner(r.get_rides(), r.get_meta_info()) a.assign_rides() w = writer.Writer(a.assigned_rides, out_filename) if __name__ == '__main__': main()
def __init__(self, outputfile, tokens): self.__writer = writer.Writer(outputfile) self.__lut = {'static': [0], 'field': [0], 'labels': [0]} self.__name = tokens[1]
async def __login( self, LoginId, LoginPw, ): r = json.loads( httpApi.Login(LoginId, LoginPw, self.device_name, self.device_uuid)) if r["status"] == -101: print("다른곳에 로그인 되있습니다.") print("로그인 되있는 PC에서 로그아웃 해주세요") elif r["status"] == -100: print("디바이스 등록이 안 되어 있습니다.") print("RegisterDevice.py를 실행해주세요") if r["status"] != 0: self.loop.stop() raise Exception(str(r)) self.__accessKey = r["access_token"] # print(self.__accessKey) bookingData = booking.getBookingData().toJsonBody() checkInData = checkIn.getCheckInData( bookingData["ticket"]["lsl"][0], bookingData["wifi"]["ports"][0]).toJsonBody() self.__StreamReader, self.__StreamWriter = await asyncio.open_connection( checkInData["host"], int(checkInData["port"])) self.__crypto = cryptoManager.CryptoManager() self.__writer = writer.Writer(self.__crypto, self.__StreamWriter, self.packetDict) LoginListPacket = Packet( 0, 0, "LOGINLIST", 0, bson.encode({ "appVer": "3.1.4", "prtVer": "1", "os": "win32", "lang": "ko", "duuid": self.device_uuid, "oauthToken": self.__accessKey, "dtype": 1, "ntype": 0, "MCCMNC": "999", "revision": 0, "chatIds": [], "maxIds": [], "lastTokenId": 0, "lbk": 0, "bg": False, })) self.__StreamWriter.write(self.__crypto.getHandshakePacket()) self.loop.create_task(self.__writer.sendPacket(LoginListPacket)) self.loop.create_task(self.__recvPacket()) self.loop.create_task(self.__heartbeat())
# others parser.add_argument('--seed', type=int, default=1) args = parser.parse_args() args.work_dir = osp.dirname(osp.realpath(__file__)) args.out_dir = osp.join(args.work_dir, 'logs', 'spiralnet') args.checkpoints_dir = osp.join(args.work_dir, 'ckpts', 'spiralnet', args.exp_name) print(args) utils.makedirs(args.out_dir) utils.makedirs(args.checkpoints_dir) writer = writer.Writer(args) device = torch.device( 'cuda', args.device_idx) if torch.cuda.is_available() else torch.device('cpu') torch.set_num_threads(args.n_threads) # deterministic torch.manual_seed(args.seed) cudnn.benchmark = False cudnn.deterministic = True # load dataset template_fp = osp.join('./template', 'L_Hipp_template_om_2922.ply') meshdata = SpiralNetDatasets(args.train_data, args.test_data, template_fp,
import finder, run_comparison, writer if __name__ == "__main__": finder_d = finder.Finder("D:\\") finder_c = finder.Finder("C:\\") writer_d = writer.Writer("D:\\test\\", "D") writer_c = writer.Writer("D:\\test\\run_2", "C") writer_d.start() writer_c.start() run_comparison.run_comparison(10000, finder_d, writer_d) run_comparison.run_comparison(10000, finder_c, writer_c)
import neuralnet import numpy as np import click import writer writer = writer.Writer() @click.group() def main(): pass @click.command(options_metavar='<options>') @click.option("--lam", type=click.FLOAT, default=1, help="The regularization amount [default 1]") @click.option( "--maxiter", default=250, type=click.INT, help= "The maximum iterations for chosen to minimise the cost function [default 250]" ) @click.option("--output", type=click.STRING, help="A file path to save the minimised parameters to") @click.option("--normalize", default=True, type=click.BOOL,
def __init__(self): self.config = config.Config() self.timer = False self.writer = writer.Writer() render("swirl") buttons.listen(self.on_press)
classifier_choice, 'f-measure') else: #Experiment 1.5 args: -i ./img/experiment_1_5/ -s 2 -d ./data/CreateDebate/ classifier1_5 = copy.deepcopy(classifier_dict[classifier_choice]) unseen_target = rdr.select_topic( ) if args['unseen'] not in rdr.dir_lst else args['unseen'] print("foo bar") if (input('Reload data?') == 'y'): train_data = rdr.load_cd(topic, 'ALL') test_data = rdr.load_cd(unseen_target, 'ALL') train_data = equal_stance_proportions(train_data) test_data = equal_stance_proportions(test_data) if classifier_choice in baselines: wvm_gen = writer.Writer(train_data) wvmodel = Model_Wrapper(wvm_gen.skipgram(15, 4, 151)) test_arrays = wvmodel.vectorise_debate(test_data) else: wvmodel = Model_Wrapper(None, True) train_arrays = wvmodel.vectorise_debate(train_data) train_label = wvmodel.stance_to_n(train_data) test_label = wvmodel.stance_to_n(test_data) classifier1_5.fit(train_arrays, train_label) if classifier_choice in baselines: accuracy = classifier1_5.score(test_arrays, test_label) predicted = list(classifier1_5.predict(test_arrays)) else: sanitized_bodies = np.array([ re.sub('[^a-zA-z0-9\s]', '', p.body.lower())
# 爬取简书排行榜 base_url = 'http://jianshu.com' add_url = '/recommendations/notes' num = 0 while (True): try: first_page = requests.request('get', base_url + add_url).content soup = BeautifulSoup(first_page, "lxml") # 爬取标题、链接和内容 title_list = [k.get_text() for k in soup.select(".title")] url_list = [ 'http://www.jianshu.com/' + k['href'] for k in soup.select(".title") ] content_list = [j.get_text() for j in soup.select(".abstract")] # 输出成html文件 writer = writer.Writer(title_list, url_list, content_list) writer.write() try: # print(soup.select(".ladda-button")) add_url = soup.select(".ladda-button")[-1].get("data-url") except BaseException as err: print err break except Exception as e: print(e) break
def __init__(self): if hasattr(sys, 'frozen'): self.local_path = os.path.realpath(os.path.dirname(sys.executable)) sys.path.append(self.local_path) else: self.local_path = os.path.realpath(os.path.dirname(__file__)) #self.__init_config() self.wtree_path = os.path.join(self.local_path, 'editor.ui') self.builder = Gtk.Builder() self.builder.add_from_file(self.wtree_path) self.builder.connect_signals(self) self.main_window = self.builder.get_object('main_window') self.main_window.connect('delete-event', self.on_main_window_delete_event) self.main_window.connect('destroy', Gtk.main_quit) self.treeview = self.builder.get_object('treeview') self.treeview.connect('row-activated', self.on_treeview_row_activated) self.main_window.show_all() self.main_box = self.builder.get_object('hbox1') self.main_box.hide() self.project_dialog = dialogs.ProjectDialog(self.builder, self.main_window) self.search_dialog = dialogs.SearchDialog(self.builder, self.main_window) self.file_list = self.builder.get_object('file_list') self.__load_config() self.main_box.show_all() self.teste = self.builder.get_object('viewport3') self.teste2 = self.builder.get_object('textview1') self.teste2.modify_font(Pango.FontDescription(fontDescription)) self.teste3 = self.builder.get_object('textview2') self.teste3.modify_font(Pango.FontDescription(fontDescription)) self.buf = self.teste3.get_buffer() self.panel_separator = self.builder.get_object("hpaned1") self.drawing_area = drawer.Drawer( self.config_files, bg=os.path.join(self.config_path, self.main_config['BackgroundFolder']), font=os.path.join(self.config_path, self.main_config['FontFolder'])) self.drawing_area.mask_event(Gdk.EventMask.BUTTON_PRESS_MASK) self.writing_area = writer.Writer() self.teste2.set_buffer(self.writing_area) self.writing_area.connect('changed', self.on_writing_area_changed) self.writing_area.connect('mark-set', self.on_writing_area_mark_set) self.s_id_1 = self.writing_area.connect( 'modified-changed', self.on_writing_area_modified_changed) self.drawing_area.connect('draw', self.on_drawing_area_expose_event) self.drawing_area.show() self.teste.add(self.drawing_area) self.file_list_iter = [] self.__init_file_list()