def load_lisp_core(filename="core.lisp"): # Load up and evaluate core.lisp f = open(filename) reader = Reader(f.read(), filename) f.close() for expr in reader.read(): expr.evaluate(core.scope)
def load(self): _path = os.path.join(self.root_dir, 'database', 'master_scol') with open(_path, 'rb') as _f: buf = Reader(_f) I, S = buf.read_int, buf.read_str def assert_int(i): if I() != i: buf.error("Not valid scenario data") count = I() # 剧情章节总计 for chapter in range(count): chapter_id = I() chapter = self.scenario[chapter_id] = Chapter() assert_int(0) chapter.id = chapter_id chapter.i = I() chapter.title = S() chapter.sections = [] for i in range(I()): section = Section() section.prefix = I() section.title = S() section.battle_num = I() section.talks = [] for j in range(I()): section.talks.append(I()) chapter.sections.append(section) if chapter_id < 400: assert_int(0xc8) else: assert_int(0xa1) buf.end()
def evaluate(expr): reader = Reader(expr) try: exprs = reader.read() except Exception, e: print e return
def message_affiche_choix(message, rcenter): texte = Reader(message, pos=(rcenter[0] - 290, rcenter[1] + 210), width=cote_fenetre - 20, fontsize=16, height=80, bg=(150, 150, 150), fgcolor=(20, 20, 20)) # texte.TEXT = message continuer_2 = 1 choix = "n" # valeur par défaut while continuer_2: for event in pygame.event.get(): texte.show() if event.type == JOYBUTTONUP: if event.button == 0: choix = "o" continuer_2 = 0 elif event.button == 1: choix = "n" continuer_2 = 0 elif event.type == KEYDOWN: if event.key == K_n: choix = "n" continuer_2 = 0 elif event.key == K_o: choix = "o" continuer_2 = 0 elif event.type != KEYDOWN and event.type != MOUSEBUTTONDOWN and event.type != JOYBUTTONUP: continue elif event.type == QUIT: sys.exit() return choix
def main(unused_args): reader = Reader(split = 0.9) x_train, y_train, x_test, y_test = reader.get_data(glob('../../WSJ-2-12/*/*.POS')) print('len(reader.word_to_id)',len(reader.word_to_id), 'len(reader.tag_to_id)', len(reader.tag_to_id)) print('len(x_train)',len(x_train), 'len(x_test)', len(x_test)) best_misclass = 1.0 with tf.Graph().as_default(), tf.Session() as session: initializer = tf.random_uniform_initializer(-FLAGS.init_scale, FLAGS.init_scale) with tf.variable_scope("model", reuse=None, initializer=initializer): m = RNNTagger(True, len(reader.word_to_id), len(reader.tag_to_id)) with tf.variable_scope("model", reuse=True, initializer=initializer): mtest = RNNTagger(False, len(reader.word_to_id), len(reader.tag_to_id)) tf.initialize_all_variables().run() saver = tf.train.Saver() for i in range(FLAGS.max_max_epoch): lr_decay = FLAGS.lr_decay ** max(i - FLAGS.max_epoch, 0.0) m.assign_lr(session, FLAGS.learning_rate * lr_decay) print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr))) train_perplexity, _ = run_epoch(session, m, x_train, y_train, m.train_op, verbose=True) _, misclass = run_epoch(session, mtest, x_test, y_test, tf.no_op(), verbose=True) if misclass < best_misclass: best_misclass = misclass fname = 'dropout_double_rnn_tagger_' + str(best_misclass) saver.save(session, fname, global_step=i) print('saving', fname)
def __init__(self, file_path, tree_name): """ Constructor """ ## Execute the base class constructor Reader.__init__(self, file_path, tree_name)
def get_nuclide_from_file(self, file_name): try: reader = Reader(file_name) reader.read_z_a(self.z, self.a) nuclide = Nuclide(name=reader.nuclide_data.name, z=int(reader.nuclide_data.z), a=int(reader.nuclide_data.a)) return nuclide except Exception, e: raise e
def loadData(self): reader = Reader() print('loading data') self.X_train, self.y_train, self.meta_train=self.prepareData(reader.getData(TRAIN)) print('train data has been loaded!') self.X_valid, self.y_valid, self.meta_valid=self.prepareData(reader.getData(DEV)) print('valid data has been loaded!') self.X_test, self.y_test, self.meta_test=self.prepareData(reader.getData(TEST)) print('test data has been loaded!')
def __switch(self, xiinArgDict): """ Traffic director. """ from reader import Reader reader = Reader() # Write output if xiinArgDict.filename is not None: print('Starting xiin...') print('') with open(xiinArgDict.filename, 'w') as xiinArgDict.outputFile: reader.info(xiinArgDict) #Displays output. elif xiinArgDict.display: print('Starting xiin...') print('') reader.info(xiinArgDict) elif xiinArgDict.grep is not None: print('Starting xiin...') print('') print('Searching files...') print('') self.grepXiinInfo(xiinArgDict.grep) elif xiinArgDict.upload is not None: # xiin.ftp = {'source': '', 'destination': '', 'uname': '', 'password': ''} from uploader import Uploader xiinArgDict.ftpSource = None xiinArgDict.ftpDestination = None xiinArgDict.ftpUname = None xiinArgDict.ftpPwd = None if len(xiinArgDict.upload ) > 0: xiinArgDict.ftpSource = xiinArgDict.upload[0] xiinArgDict.ftpDestination = xiinArgDict.upload[1] if len(xiinArgDict.upload ) > 2: # Legacy support if xiinArgDict.ftpUname is 'anon' or xiinArgDict.ftpUname is 'anonymous': pass else: xiinArgDict.ftpUname = xiinArgDict.upload[2] xiinArgDict.ftpPwd = xiinArgDict.upload[3] print('Starting xiin uploader...') print('') print('Uploading debugging information...') print('') uploader = Uploader() uploader.upload(xiinArgDict.ftpSource, xiinArgDict.ftpDestination, xiinArgDict.ftpUname, xiinArgDict.ftpPwd) else: print('ERROR: Unknown') exit(7)
def main(size, path): checker = Checker(size) reader = Reader(checker, size) data = reader.read("data\\" + path) solver = Solver(checker, size) try: return solver.solve(data) except Exception as e: return str(e)
def main(): SUCCESS = 0 #arguments declaration parser = argparse.ArgumentParser() parser.add_argument('--omitheader','-m', help='Do not show the header about pizza and kittens', action='store_true', default=False) parser.add_argument('--hublist', help='Shows all available hubs', action='store_true', default=False) parser.add_argument('--similar','-s', help='Displays similar hubs as a histogram', nargs=1, metavar=("hub_name")) parser.add_argument('--alsoread','-a', help='Displays what else people read from this hub as a histogram', nargs=1, metavar=("hub_name")) parser.add_argument('--max', help='Print several hubs that maximize the score function e.g. --similar or --alsoread', nargs=1, metavar=("number_of_hubs"), type=int) parser.add_argument('--min', help='Print several hubs that minimize the score function e.g. --similar or --alsoread', nargs=1, metavar=("number_of_hubs"), type=int) parser.add_argument('--company', help='If a name is ambiguous, like yandex: it is a hub and a company, then enforce company interpretation', action="store_true", default=False) args = vars(parser.parse_args()) #check flags and delegate functions to src/reader.py and src/hubs_wrapper.py if len(sys.argv)==1: print_header_hubs() parser.print_help() return SUCCESS if args['omitheader']: pass else: print_header_hubs() if args['hublist']: Reader.print_hubs() return SUCCESS isCompany = False if args['company']: isCompany = True flag = None flagopts = None if args['max']: flag = "max" flagopts = args['max'][0] if args['min']: flag = "min" flagopts = args['min'][0] if args['similar']: hub_name = args['similar'][0] display_preferences(hub_name, isCompany, "similarity", flag, flagopts) return SUCCESS if args['alsoread']: hub_name = args['alsoread'][0] display_preferences(hub_name, isCompany, "inclusion", flag, flagopts) return SUCCESS
class Wayterm(object): def __init__(self): self.app_key = '1746312660' self.app_secret = 'a113b12f49266b12125f6df1f9808045' self.callback_url = 'http://wayterm.nerocrux.org/done' self.template = Template() self.url = Url() self.reader = Reader() self.token = {} if self._read_access_token(): self.client = Client(self.app_key, self.app_secret, self.callback_url, self.token) else: self.client = Client(self.app_key, self.app_secret, self.callback_url) self.auth_url = self.url.shorten(self.client.authorize_url) print '[1] Open this url in your browser: ' + self.auth_url self.auth_code = raw_input('[2] Enter authorization code: ') self.client.set_code(self.auth_code) token = { 'access_token':self.client.token['access_token'], 'expires_at':self.client.token['expires_at'], 'uid':self.client.token['uid'], } self._write_access_token(token) print 'Authorization done. Enjoy!' def _read_access_token(self): try: self.token = yaml.load(open(os.path.join(os.getenv('HOME'), '.wayterm.yaml')).read()) except: return False return True def _write_access_token(self, token): stream = file(os.path.join(os.getenv('HOME'), '.wayterm.yaml'), 'w') yaml.dump(token, stream) def _init_print(self): self.reader.printfile('logo') def call(self, command): if command[0].lower() == 'exit': exit() if command[0].lower() == 'help': self.reader.printfile('help') return api = Api(self.client) api.call(command)
def main_test(): reader = Reader() retainer = Retainer() params = reader.read() api_targets = [] for name, param in params.items(): parser = Parser(name, param) api_targets.append(parser) for target in api_targets: creater = Creater(target) api_parameters = creater.create() retainer.insert(target.name, api_parameters) retainer.close()
def __init__(self): reader = Reader() print('loading data') self.X_train=reader.getData(TRAIN) print('train data has been loaded!') self.X_valid=reader.getData(DEV) print('valid data has been loaded!') self.X_test=reader.getData(TEST) print('test data has been loaded!') self.c_title=[] self.c_body=[] self.bigram=Phrases.load('./data/bigram.dat') self.trigram=Phrases.load('./data/trigram.dat')
def load(self): _path = path.join(self.root_dir, 'database', 'master_card') with open(_path, 'rb') as _f: buf = Reader(_f) count = buf.read_int() # 卡牌数量 offsets = [0] * count self.cards = cards = [None] * count for i in range(count): cards[i] = MACard() offsets[i] = buf.read_int() for i, card in enumerate(cards): buf.seek(offsets[i]) card.load(buf)
def evaluate_file(filename): try: f = open(filename) except IOError: print "Cannot open file %s" % repr(filename) return reader = Reader(f.read()) f.close() try: exprs = reader.read() except Exception, e: print e return
class Generator: def __init__(self, filename): self.reader = Reader(filename) def execute(self): self.reader.execute() datas=self.reader.getData() tps=[InterfaceTemplate(datas), ImplementTemplate(datas), \ TestTemplate(datas)] for tp in tps: print tp.execute()
class Notifier(wx.App): """main notifier app""" def __init__(self): wx.App.__init__(self, redirect=0) # menu handlers menu = [ ("Show again", self.again), ("Settings", self.settings), ("Exit", self.exit), ] # main objects self.icon = Icon(menu) self.popup = Popup() self.reader = Reader(feeds=["http://digg.com/rss/index.xml"]) # main timer routine timer = wx.Timer(self, -1) self.Bind(wx.EVT_TIMER, self.main, timer) timer.Start(500) self.MainLoop() def main(self, event): if not self.popup.opened(): # show popup for next new item for item in self.reader.items(): self.popup.show("%(feed)s\n%(title)s%(title)s%(title)s%(title)s%(title)s%(title)s%(title)s%(title)s" % item) status = "on" break else: status = "off" # set icon status self.icon.setStatus(status) def again(self): print "again" def settings(self): print "settings" def exit(self): # close objects and end self.reader.close() self.icon.close() self.Exit()
def main(opts): if not opts.demo: from reader import Reader global q delay = 0.001 lights = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0} print opts if not opts.demo: reader = Reader(lights) sounder = Sounder() player = Player(sounder) songs = os.listdir("./songs/") songs = filter(lambda x: x.endswith(".js"), songs) numpy.random.shuffle(songs) print ("Loaded songs:") print (songs) for song in cycle(songs): # time.sleep(.5) q = player.song(song) while True: debug_str = "" time.sleep(delay) if not opts.demo: reader.fetch() if not q["play"].empty(): player.chord(q["play"].get_nowait()) for ch in lights: debug_str += "{}:{} ".format(ch, lights[ch]) if lights[ch] < 650 or opts.demo: sounder.start(get_str(ch)) else: sounder.stop(get_str(ch)) # print(debug_str) time.sleep(delay) if q["sig"] == "stopped": print "caught sig... stopping song" sounder.mute() break
def test_extractLinks(self): print ">>> test_extractLinks" app = LinkCrawler() reader = Reader(app) configIsLoaded = app.loadConfigurationSite("unittest") if configIsLoaded is False: self.fail("load configuration failed") else: response = reader.getResponse('http://www.scandio.de') responseData = response[3] links = reader.extractLinks(responseData, 'http://www.scandio.de') print "<<< test_extractLinks [links: %s]\n" % links self.failUnless(links)
def message_short(message, rcenter): # texte.TEXT = message texte = Reader(message, pos=(rcenter[0] - 290, rcenter[1] + 210), width=cote_fenetre - 20, fontsize=16, height=80, bg=(150, 150, 150), fgcolor=(20, 20, 20)) continuer_2 = 1 tps_deb = time.time() + 2 while continuer_2: if time.time() >= tps_deb: continuer_2 = 0 for event in pygame.event.get(): texte.show() if event.type == QUIT: sys.exit() elif event.type == KEYDOWN and event.key == K_ESCAPE: sys.exit()
def run_parser(filepath, request): print >>sys.stderr, 'Parsing started with {}'.format(filepath) base_file_path = os.getcwd() try: padding = 300 file_type = check_file_type(filepath) if file_type == 'file error': error = 'This file is not the correct input type, please try an LRG or GB file' document_list = created_documents.objects.order_by('-created_on') return render(request, 'web_interface/app_homepage.html', {'form': UploadForm(), 'document_list': document_list, 'error_message': error}) dictionary = {} if file_type == 'gbk': gbk_reader = GbkParser(filepath, padding, True) dictionary = gbk_reader.run() parser_details = gbk_reader.get_version elif file_type == 'lrg': lrg_reader = LrgParser(filepath, padding, True) dictionary = lrg_reader.run() parser_details = lrg_reader.get_version parser_details = '{0} {1} {2}'.format(file_type.upper(), 'Parser:', parser_details) os.chdir('web_interface') os.chdir('output') for transcript in dictionary['transcripts']: input_reader = Reader() writer = LatexWriter() reader_details = 'Reader: ' + input_reader.get_version writer_details = 'Writer: ' + writer.get_version xml_gui_details = 'Control: {}'.format(get_version) list_of_versions = [parser_details, reader_details, writer_details, xml_gui_details] input_list, nm = input_reader.run(dictionary, transcript, True, list_of_versions, True, file_type, 'web user') transcript_accession = dictionary['transcripts'][transcript]['NM_number'] file_name = transcript_accession latex_file, pdf_file = writer.run(input_list, file_name) call(["pdflatex", "-interaction=batchmode", latex_file]) save_as_model = created_documents(transcript=transcript_accession, location=pdf_file, gene=dictionary['genename'], created_on=datetime.now()) save_as_model.save() delete_all_but_most_recent() except: os.chdir(base_file_path) os.chdir(base_file_path) clean_up(os.path.join('web_interface', 'output')) clean_up(os.path.join('web_interface', 'input'))
class Generator: def __init__(self, configFile, templetes): self.reader = Reader(configFile) self.templetes = templetes def _templateList(self): datas = self.reader.getData() return map(lambda tp : self._template_import__private(tp)(datas), self.templetes) def _template_import__private(self, model, filename=None): if not filename: filename = model.lower() name = "%sTemplate" % (model) mod = __import__("lib.template.model.%s" % filename, fromlist=[name]) return getattr(mod, name) def _write(self, files): path = "dist" if not os.path.exists(path): os.makedirs(path) for name, body in files.iteritems(): f = open("%s/%s.java" % (path, name), "w") f.write(body) f.close() print "Generated file to %s" % os.path.abspath(path) def execute(self): self.reader.execute() tps = self._templateList() files = {} for tp in tps: body = tp.execute() m = re.search("public (class|interface) (\S+)", body) name = m.group(2) files[name] = body self._write(files)
class ReadLayer(object): def __init__(self, rng, h_shape, image_shape, N, name='Default_readlayer'): print('Building layer: ' + name) self.lin_transform = HiddenLayer( rng, n_in=h_shape[0] * h_shape[1], n_out=4, activation=None, irange=0.001, name='readlayer: linear transformation') self.reader = Reader( rng, image_shape=image_shape, N=N, name='readlayer: reader') self.params = self.lin_transform.params def one_step(self, h, image): linear = self.lin_transform.one_step(h) read, g_x, g_y, delta, sigma_sq = self.reader.one_step(linear, image) return read, g_x, g_y, delta, sigma_sq
def __init__(self, config): ## define class attributes self.logger = logging.getLogger("Viewer") self.data = None # current data to show self.header = None # first line of file to use as header self.title = None # filename self.useHeader = config['header'] # if using first line as header self.useRegex = config['regex'] # if searching with regular expression self.searchText = config['search'] # initial search string (may be empty) self.separator = str(config['separator']) # column separator (e.g. comma, tab, space) # handle highlighting of alternate lines self.highlight = config['highlightLines'] #self.highlight_color = wx.Colour(242, 242, 242) # light gray for light themes self.highlight_color = wx.Colour(32, 32, 32) # dark gray for dark themes self.text = None # search box object self.regex_text = None # regex checkbox object self.header_box = None # header checkbox object self.reader = Reader(config) chunk = self.reader.next() self.ReadData(chunk) self.header = self.data[0] self.title = config['filename'].split('/')[-1] self.InitViewer()
def feeds(): reader = Reader() for feed in config["feeds"]: reader.add(feed) reader.run() stories = reader.stories[0:100] return make_response( json.dumps({"count": len(stories), "stories": stories, "template": file_get_contents("templates/stories.hbs")}), 200, {"Content-type": "application/json"}, )
def __init__(self, path): self.path = path self.queue = Queue.PriorityQueue() self.watcher = Watcher(path, self.queue) self.walker = Walker(path, self.queue, Settings.is_rescan_forced()) self.reader = Reader(self.queue) self.validator = Validator(self.queue)
def __init__(self, log=None): """Initialize a reusable instance of the class.""" super(Parser, self).__init__() self._reader = Reader() self.log = log if log: self._init_logging()
def compute(hubname, isCompany, fun_name): hub_readers = Reader.check_and_download(hubname, isCompany) hubs_data_dir = "data/hubs/" tocut = len(hubs_data_dir) hubs = glob.glob(hubs_data_dir + "*") similarity_dict = dict() for hub_file in hubs: readers = Reader.read_list_of_users(hub_file) hub = hub_file[tocut:] # skip itself if hub == hubname: continue if fun_name == "similarity": similarity_dict[hub] = jaccard_index(hub_readers, readers) if fun_name == "inclusion": similarity_dict[hub] = inclusion(hub_readers, readers) return similarity_dict
def test_completeRelativePath(self): print ">>> test_completeRelativePath\n" app = LinkCrawler() reader = Reader(app) array = [['/en/de/index.html', '/aa/bb/index.htm'] , ['/en/de/index.html', 'cc/dd/index.htm'] , ['/en/de/index.html', '../cc/dd/index.htm'] , ['/en/', 'search_iframe_en.htm'] , ['/en/', '/search_iframe_en.htm'] , ['/en/', '../search_iframe_en.htm']] for i in range(0, len(array)): relPath = reader.completeRelativePath(array[i][0], array[i][1]) print "test_completeRelativePath [path: %s, parent: %s, relPath: %s]\n" % (array[i][0],array[i][1],relPath) self.failUnless(relPath) print "<<< test_completeRelativePath \n"
def test_from_file(filepath): reader = Reader() model = DynamicLSTM(None, is_training=False, reuse=False) model_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="model") model_saver = tf.train.Saver(model_variables) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess: ckpt_path = tf.train.latest_checkpoint(config.model_dir) if ckpt_path: model_saver.restore(sess, tf.train.latest_checkpoint(config.model_dir)) print("Read model parameters from %s" % tf.train.latest_checkpoint(config.model_dir)) else: print("model doesn't exists") model_path = os.path.join(config.model_dir, config.model_name) model_saver.save(sess, model_path, global_step=0) data_gen = reader.get_custom_line_from_file(filepath) for inputs, inputs_len in data_gen: feed_dict = {model.x: inputs, model.x_len: inputs_len} prob = sess.run([model.output_prob], feed_dict=feed_dict)
def decode_user_command(record): rdr = Reader(BytesIO(record.value)) k_rdr = Reader(BytesIO(record.key)) cmd = {} cmd['type'] = rdr.read_int8() cmd['str_type'] = decode_user_cmd_type(cmd['type']) if cmd['type'] == 5 or cmd['type'] == 7: cmd['user'] = k_rdr.read_string() cmd['cred'] = {} cmd['cred']['version'] = rdr.read_int8() cmd['cred']['salt'] = rdr.read_iobuf().hex() cmd['cred']['server_key'] = rdr.read_iobuf().hex() cmd['cred']['stored_key'] = rdr.read_iobuf().hex() # obfuscate secrets cmd['cred']['salt'] = obfuscate_secret(cmd['cred']['salt']) cmd['cred']['server_key'] = obfuscate_secret(cmd['cred']['server_key']) cmd['cred']['stored_key'] = obfuscate_secret(cmd['cred']['stored_key']) elif cmd['type'] == 6: cmd['user'] = k_rdr.read_string() return cmd
def model(self): X_reader = Reader(self.X_train_file, name='X', image_size=self.image_size, batch_size=self.batch_size) Y_reader = Reader(self.Y_train_file, name='Y', image_size=self.image_size, batch_size=self.batch_size) x = X_reader.feed() y = Y_reader.feed() cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y) # X -> Y fake_y = self.G(x) G_gan_loss = self.generator_loss(self.D_Y, fake_y, use_lsgan=self.use_lsgan) G_loss = G_gan_loss + cycle_loss D_Y_loss = self.discriminator_loss(self.D_Y, y, self.fake_y, use_lsgan=self.use_lsgan) # Y -> X fake_x = self.F(y) F_gan_loss = self.generator_loss(self.D_X, fake_x, use_lsgan=self.use_lsgan) F_loss = F_gan_loss + cycle_loss D_X_loss = self.discriminator_loss(self.D_X, x, self.fake_x, use_lsgan=self.use_lsgan) # Summary tf.summary.histogram('D_Y/true', self.D_Y(y)) tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x))) tf.summary.histogram('D_X/true', self.D_X(x)) tf.summary.histogram('D_X/fake', self.D_X(self.F(y))) tf.summary.scalar('loss/G', G_gan_loss) tf.summary.scalar('loss/D_Y', D_Y_loss) tf.summary.scalar('loss/F', F_gan_loss) tf.summary.scalar('loss/D_X', D_X_loss) tf.summary.scalar('loss/cycle', cycle_loss) tf.summary.image('X/generated', utils.batch_convert2int(self.G(x))) tf.summary.image('X/reconstruction', utils.batch_convert2int(self.F(self.G(x)))) tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y))) tf.summary.image('Y/reconstruction', utils.batch_convert2int(self.G(self.F(y)))) return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
class Explorer: def __init__(self, url): self._reader = Reader(url) if (self._reader is None): return None if (self._reader._soup is None): self._reader = None return None def read_pritty(self): print(self._reader._soup.get_text()) def write_to_file(self, file_name): if (self._reader is None): return None text = self._reader._soup.get_text() write_text_to_file(file_name, text) def explore_links_from_articles(self, file_name): if (self._reader is None): return None list = self._reader.links_to_articles_reader() f = open(file_name, "a") for element in list: f.write(str(element)) f.write('\n') f.close() def explore_authors(self, file_name): if (self._reader is None): return None list = self._reader.read_comments_authors() if (len(list) > 10): for i in range(10): write_text_to_file(str(i) + file_name, list[i])
def __most_expensive_node_car(car_number, solution, data: Reader): """ take node with highest travel and delivery cost in a car Returns: index of node in solution """ vehicleDict = data.getVehiclesDict() vertexDict = data.getVertexDict() callsDict = data.getCallsDict() nodeDict = data.getNodes() home, _, _ = vehicleDict[car_number] cur_node = home car_index = car_number started_calls = [] record_cost = 0 record_index = 0 start, stop = __get_car_index(car_number, solution, data.num_cars) for index, call in enumerate(solution[start:stop + 1]): assert call is not 0 (origin, dest, _, failCost, _, _, _, _) = callsDict[call] _, origin_cost, _, dest_cost = nodeDict[(car_index, call)] if call not in started_calls: started_calls.append(call) cur_cost = origin_cost next_node = origin else: started_calls.remove(call) cur_cost = dest_cost next_node = dest _, travel_cost = vertexDict[(car_index, cur_node, next_node)] cur_cost = cur_cost + travel_cost cur_node = next_node if cur_cost > record_cost: record_cost = cur_cost record_index = index assert solution[record_index + start] == solution[start:stop + 1][record_index] return record_index + start
def evaluate(source, env): if isinstance(source, str): return env.find(source)[source] elif not isinstance(source, list): return source elif source[0] == 'define': key, value = source[1], evaluate(source[2], env) env[key] = value return "'{}' => {}".format(key, value) elif source[0] == 'quote': return source[1] elif source[0] == 'cons': new = evaluate(source[1], env) lst = [evaluate(expr, env) for expr in source[2:]][0] # lst = evaluate(source[2:], env) lst.insert(0, new) return lst elif source[0] == 'car': return evaluate(evaluate(source[1], env)[0], env) elif source[0] == 'cdr': return evaluate(source[1], env)[1:] elif source[0] == 'cond': for statement in source[1:]: cond, expr = statement[0], statement[1] if cond == "else" or evaluate(cond, env): return evaluate(expr, env) raise SyntaxError( "Invalid syntax of 'cond'") elif source[0] == 'lambda': def gen_body_of_lamda(exps, vargs, args): inner_env = Env(vargs, args, env) for exp in exps: result = evaluate(exp, inner_env) return result vargs, expr = source[1], source[2:] return lambda args: gen_body_of_lamda(expr, vargs, args) elif source[0] == 'load': match = re.search(re.compile(r'(?<=\").*(?=\")'), source[1]) file_path = match.group(0) with open(file_path) as f: module_contents = Reader.parse(f.read()) for target in module_contents: print(evaluate(target, env)) elif source[0] == 'eq?': return evaluate(source[1], env) == evaluate(source[2], env) else: operator = env.find(source[0])[source[0]] args = [evaluate(expr, env) for expr in source[1:]] return operator(args) if callable(operator) else evaluate(operator, env)
def __init__(self, n, file): self.path_width = 4 # Width of the paths drawn on screen (px) self.n = n # Number of circles self.upr = 100 # Updates per rotation, for upr=50, it takes 50 updates to reach a full rotation self.t = 0 # 'time' passed since start. self.width, self.height = 1920, 1080 # Width and height of screen shown on screen/video self.fullscreen = True self.video_mode = False # Video mode creates a video of the drawing self.fps = 40 # Frames per second in video self.video_length = 200 # Number of frames in video self.finished = False self.framecount = 0 if self.video_mode: fourcc = VideoWriter_fourcc(*'MP42') self.video = VideoWriter('C:\\Koding\\Scripts\\Python\\Fourier Series\\Videoer\\' +file+ '_' +str(self.n)+ '.avi', fourcc, float(self.fps), (self.width, self.height)) self.root = Tk() # Main window if self.fullscreen: self.root.attributes("-fullscreen", True) # set fullscreen else: self.root.geometry(str(self.width) + "x" + str(self.height)) # Setting width and height of video self.root.bind("<Escape>", self.close) # Pressing escape closes the window self.canvas = Canvas(self.root, width=self.width, height=self.height) # Canvas to draw on self.canvas.pack() self.canvas.create_text(100,100,text="n="+str(n),font="Times 50 italic bold") self.reader = Reader(self.n, self.upr, self.width, self.height, self.path_width) self.paths = self.reader.read_svg(file + ".svg") # Read SVG file and calculate vectors for path in self.paths: # Draw startposition of all vectors on the canvas for vector in path.vectors: vector.create(self.canvas) self.root.after(1, self.frame) # Run first frame after 1ms self.root.mainloop() # Start window
def create_all_csvs(r: Reader, a: Analyzer): diffs = defaultdict(list) diffs_sq = defaultdict(list) for filename in get_all_log_files(): r.load_log_file(filename) out_folder = "csvs/{}".format(filename[:-4]) if not os.path.exists(out_folder): os.mkdir(out_folder) all_csvs = a.get_all_csv_list() for key, csv_list in all_csvs.items(): vals = csv_list[1].split(",")[-2:] avg_diff = int(float(vals[0])) avg_diff_sq = int(float(vals[1])) diffs[key].append(avg_diff) diffs_sq[key].append(avg_diff_sq) with open("{}/{}.csv".format(out_folder, key.lower()), "w") as file: for line in csv_list: file.write(line) avgs = {} avgs_sq = {} for key, l in diffs.items(): avgs[key] = int(sum(l) / len(l)) for key, l in diffs_sq.items(): avgs_sq[key] = int(sum(l) / len(l)) print("AVG:", avgs) print("AVG SQS:", avgs_sq)
def test_7_get_timestamps_for_epoch(self): os.chdir(rootwd) for key in self.file_nostim: reader_io_nostim = Reader(self.file_nostim[key]) os.chdir(pwd) orderedepochs_soma = reader_io_nostim.drawout_orderedepochs( 'soma') #drawout orderedepoch orderedepochs_axon = reader_io_nostim.drawout_orderedepochs('axon') epoch_id = 0 times_soma = reader_io_nostim.get_timestamps_for_epoch( epoch_id, orderedepochs_soma) times_axon = reader_io_nostim.get_timestamps_for_epoch( epoch_id, orderedepochs_axon) # self.assertEqual([len(times_soma), times_soma], [len(times_axon), times_axon]) reader_io_nostim.closefile()
def _init_test_graph(self): # Initialize TFRecoder reader testReader = Reader(tfrecordsFile=self.dataPath[0], decodeImgShape=self.decodeImgShape, imgShape=self.inputShape, batchSize=1, minQueueExamples=1, name='test') # Batch for test data imgTest, _, self.img_name_test, self.user_id_test = testReader.batch(multi_test=self.multi_test, use_advanced=self.advanced_multi_test) # Convert the shape [?, self.num_try, H, W, 1] to [self.num_try, H, W, 1] for multi-test if self.multi_test: if self.advanced_multi_test: shape = [6*2*self.num_try, *self.outputShape] else: shape = [2*self.num_try, *self.outputShape] else: shape = [1, *self.outputShape] self.imgTests = tf.reshape(imgTest, shape=shape) self.predTest = self.forward_network(inputImg=self.normalize(self.inputImgPh), reuse=True)
def test_1_init(self): # loads nwbfile, extracts modelname, modelscale & instantiate model os.chdir(rootwd) for key in self.file_nostim: reader_io_nostim = Reader(self.file_nostim[key]) for key in self.file_stim: reader_io_stim = Reader(self.file_stim[key]) os.chdir(pwd) # reader_io_nostim.chosenmodel = self.chosenmodel reader_io_stim.chosenmodel = self.chosenmodel #print self.chosenmodel.name # this tests extract_modelname_modelscale & load_model compare1 = [ reader_io_nostim.modelname, # output of reader_io_nostim.modelscale ] # extract_modelname_modelscale() compare2 = [ reader_io_stim.chosenmodel.modelname, # output of reader_io_stim.chosenmodel.modelscale ] # load_model() self.assertEqual(compare1, compare2) reader_io_nostim.closefile() reader_io_stim.closefile()
def get_model(num_hid_layers, cells_per_layer, dropout_rate): length = Reader.getInputShape() model = Sequential() model.add(Dense(cells_per_layer, input_shape=(length,), activation='relu')) model.add(Dropout(dropout_rate)) for i in range(num_hid_layers): model.add(Dense(cells_per_layer, activation='relu')) model.add(Dropout(dropout_rate)) model.add(Dense(5, activation='softmax'))#softmax se multiclass, sigmoid se 2class model_name = models_directory + 'MLP.hidlay' + str(num_hid_layers) + '.cells' + str(cells_per_layer) + '.drop' + str(dropout_rate) plot_model(model, to_file = model_name + '.2class' + '.png', show_shapes=True) fp_model = open(model_name + '.2class' + '.json', 'w+') fp_model.write(model.to_json()) fp_model.close() return model
def model(self): X_reader = Reader(X_TRAIN_FILE, name='X') Y_reader = Reader(Y_TRAIN_FILE, name='Y') x = X_reader.feed() y = Y_reader.feed() cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y) # X -> Y G_gan_loss = self.generator_loss(self.G, self.D_Y, x, use_lsgan=self.use_lsgan) G_loss = G_gan_loss + cycle_loss D_Y_loss = self.discriminator_loss(self.G, self.D_Y, x, y, use_lsgan=self.use_lsgan) # Y -> X F_gan_loss = self.generator_loss(self.F, self.D_X, y, use_lsgan=self.use_lsgan) F_loss = F_gan_loss + cycle_loss D_X_loss = self.discriminator_loss(self.F, self.D_X, y, x, use_lsgan=self.use_lsgan) # summary tf.summary.histogram('D_Y/true', self.D_Y(y)) tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x))) tf.summary.histogram('D_X/true', self.D_X(x)) tf.summary.histogram('D_X/fake', self.D_X(self.F(y))) tf.summary.scalar('loss/G', G_gan_loss) tf.summary.scalar('loss/D_Y', D_Y_loss) tf.summary.scalar('loss/F', F_gan_loss) tf.summary.scalar('loss/D_X', D_X_loss) tf.summary.scalar('loss/cycle', cycle_loss) tf.summary.image('X/generated', utils.batch_convert2int(self.G(x))) tf.summary.image('X/reconstruction', utils.batch_convert2int(self.F(self.G(x)))) tf.summary.image('Y/generated', utils.batch_convert2int(self.F(y))) tf.summary.image('Y/reconstruction', utils.batch_convert2int(self.G(self.F(y)))) self.summary = tf.summary.merge_all() self.saver = tf.train.Saver() return G_loss, D_Y_loss, F_loss, D_X_loss
def infer(img_path, model_path, image_shape, label_dict_path): # 获取标签字典 char_dict = load_dict(label_dict_path) # 获取反转的标签字典 reversed_char_dict = load_reverse_dict(label_dict_path) # 获取字典大小 dict_size = len(char_dict) # 获取reader my_reader = Reader(char_dict=char_dict, image_shape=image_shape) # 初始化PaddlePaddle paddle.init(use_gpu=True, trainer_count=1) # 获取网络模型 model = Model(dict_size, image_shape, is_infer=True) # 加载训练好的参数 parameters = paddle.parameters.Parameters.from_tar(gzip.open(model_path)) # 获取预测器 inferer = paddle.inference.Inference(output_layer=model.log_probs, parameters=parameters) # 裁剪车牌 # cutPlateNumber = CutPlateNumber() # cutPlateNumber.strat_crop(img_path, True) # 加载裁剪后的车牌 test_batch = [[my_reader.load_image(img_path)]] # 开始预测 return start_infer(inferer, test_batch, reversed_char_dict)
class StanzaShuffler(): def __init__(self, split_path): self.path = split_path self.reader = Reader() self.data = [[d[0].split(' </br> '), d[1]] for d in self.reader.read_from_split(split_path)] def shuffle_data(self): result = [] for stanza in self.data: tmp = stanza[0] shuffle(tmp) lines = '' for line in tmp: lines = lines + ' </br> ' + line lines = lines[7:] result.append([lines, stanza[1]]) return result + self.reader.read_from_split(self.path) def save_data(self, save_path): data = self.shuffle_data() shuffle(data) with open(save_path, 'w', encoding='utf-8', newline='') as out_file: tsv_writer = csv.writer(out_file, delimiter='\t') tsv_writer.writerows(data)
def testExampleA(self): self.maxDiff = None Reader(self.mock_file) expect_output = 'outdir/file_0' with open(self.mock_expected) as f: mock_expected = f.read() mock_expected = mock_expected.split('\n') with open(expect_output) as f: actual_output = f.read() actual_output = actual_output.split('\n') self.assertItemsEqual(mock_expected, actual_output)
def runKNN_DTW(self): #Carrega o reader com os valores do conjunto de treino e a linha a ser testada reader = Reader(self.nomeCjTreino, self.vetordeteste) matrizDTW = reader.RunDTW() #Ordena a matriz de ditancias entre o teste e o conjunto de treino em orgem crescente matrizDTW.sort(key=lambda tup: tup[1]) ocorrencias = [] resultado = [] #Itera no vetor de valores de k (assim o programa não precisa recalcular pra cada valor de k, ja faz tudo de uma vez) for i in range(self.k.__len__()): resultadotemp = [] #Pega os K vizinhos mais proximos e vê qual ocorre mais vezes for x in range(self.k[i]): ocorrencias.append(matrizDTW[x][0]) c = Counter(ocorrencias).most_common() resultadotemp.append(self.k[i]) resultadotemp.append(c[0][0]) resultado.append(resultadotemp) # Retorna a ocorrencia mais comum return resultado
def model(self): if self.X_train_file != None and self.Y_train_file != None: X_reader = Reader(self.X_train_file, name='X', image_size=self.image_size, batch_size=self.batch_size) Y_reader = Reader(self.Y_train_file, name='Y', image_size=self.image_size, batch_size=self.batch_size) x = X_reader.feed() y = Y_reader.feed() else: x = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, 3]) y = tf.placeholder(dtype=tf.float32, shape=[self.batch_size, 3]) """ Loss Function """ cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y) print(cycle_loss) # X -> Y fake_y = self.G(x) # __call__(input) print(fake_y) G_gan_loss = self.generator_loss(self.D_Y, fake_y, use_lsgan=True) print(G_gan_loss) G_loss = G_gan_loss + cycle_loss D_Y_loss = self.discriminator_loss(self.D_Y, y, self.fake_y, use_lsgan=True) # Y -> X fake_x = self.F(y) print(fake_x) F_gan_loss = self.generator_loss(self.D_X, fake_x, use_lsgan=True) F_loss = F_gan_loss + cycle_loss D_X_loss = self.discriminator_loss(self.D_X, x, self.fake_x, use_lsgan=True) # summary tf.summary.histogram('D_Y/true', self.D_Y(y)) tf.summary.histogram('D_Y/fake', self.D_Y(self.G(x))) tf.summary.histogram('D_X/true', self.D_X(x)) tf.summary.histogram('D_X/fake', self.D_X(self.F(y))) tf.summary.scalar('loss/G', G_gan_loss) tf.summary.scalar('loss/D_Y', D_Y_loss) tf.summary.scalar('loss/F', F_gan_loss) tf.summary.scalar('loss/D_X', D_X_loss) tf.summary.scalar('loss/cycle', cycle_loss) return G_loss, D_Y_loss, F_loss, D_X_loss, fake_y, fake_x
def get_reader(username=None, password=None, use_cookie_file=False): reader = Reader(Opener()) if 'cookies' in session: print "Loading cookies" reader.opener.load_cookies(session['cookies']) elif use_cookie_file: print "Loading cookies from file" with open(tmp_dir + "cookies.txt", "r") as text_file: cookies = text_file.read() print cookies reader.opener.load_cookies(cookies) elif username is None: print "Cannot login, no username provided" return (None, "No username provided") else: print "Logging in as ", username reader.init() if not os.path.exists(tmp_dir): os.mkdir(tmp_dir) with open(tmp_dir + "cookies.txt", "wb") as text_file: text_file.write(reader.opener.get_cookies()) result = reader.login(username, password) if "The userID or password could not be validated" in result: print "Bad User ID or password" return (None, "Bad User ID or password") if "Concurrent Login Error" in result: print "User already logged in" return (None, "User already logged in") print "Logged in" return (reader, "")
def get_group(self, group_key_name): group = Group.get_by_key_name(group_key_name) if group is None: self.response.write('No group by that name exists.') else: # Check for new posts for group_feed in group.group_feeds: reader = Reader.create(group_feed.feed) reader.refresh() template_values = { 'group_key_name': group_key_name, 'group': group, 'user': users.get_current_user() } template = JINJA_ENVIRONMENT.get_template('templates/group.html') self.response.write(template.render(template_values))
def __init__(self, p=0.01175, n=1): # Init with optimal prune value and no grams as default self.number_of_reviews = None # Total number of analyzed reviews self.total_pos_words = 0 # Total positive words or n-grams self.total_neg_words = 0 # Total negative words or n-grams self.directory = '' # Directory (alle or subset) self.pos_freq = defaultdict(int) # Frequency of words or n-grams self.neg_freq = defaultdict(int) # Frequency of words or n-grams self.most_common_pos = [] # 25 most common words or n-grams self.most_common_neg = [] # 25 most common words or n-grams self.pos_popularity = {} # Popularity values self.neg_popularity = {} # Popularity values self.pos_highest_pop = [] self.neg_highest_pop = [] self.pos_information_value = {} # Informational values self.neg_information_value = {} # Informational values self.pos_highest_inform_val = [] # 25 highest informational value self.neg_highest_inform_val = [] # 25 highest informational value self.pos_all_words_raw = [] # Made in find_frequency (all docs as lists) self.neg_all_words_raw = [] # Made in find_frequency (all docs as lists) self.pos_doc_count = defaultdict(int) # Made in prune (in how many docs does the word appear). self.neg_doc_count = defaultdict(int) # Made in prune (in how many docs does the word appear). self.set_directory() # Prompt r = Reader() if n > 1: self.pos_all_n_grams_raw = [] self.neg_all_n_grams_raw = [] self.make_n_grams_and_find_frequency(r, n) self.prune(p, self.pos_all_n_grams_raw, self.neg_all_n_grams_raw) else: self.find_frequency(r) self.prune(p, self.pos_all_words_raw, self.neg_all_words_raw) self.find_most_common() self.find_popularity() self.find_highest_pop_value() self.find_information_value() self.find_highest_informational_value()
def main(): """Shows basic usage of the Sheets API. Prints values from a sample spreadsheet. """ env = read_env() print('start main') lang_list = get_range(env['lang']) sheet = sheets_api(credential(env['credential_file_path'])) result = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=SAMPLE_RANGE_NAME).execute() print(result) reader = Reader(result) # print(reader.get_column_info()) # print(reader.get_lang_by_column('ko')) '''
def test(): input_tensor = Input((40, 40, 1)) predicted = LeNetModel(input_tensor) model = Model(inputs=input_tensor, outputs=predicted) model.load_weights( '/home/ld/remote_project/CV_HW1/feature/LeNet/checkpoint-10-2.15.hdf5') reader = Reader('/home/ld/dataset/affNIST/training_and_validation_batches', '/home/ld/dataset/affNIST/test.mat') model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy']) score = model.evaluate(np.expand_dims(reader.test_images, axis=3), keras.utils.to_categorical(reader.test_labels, 10), verbose=0) print('Total Test Accuracy is ', score[1]) predict_res = model.predict(np.expand_dims(reader.test_images, axis=3)) calculate_acc_error(np.argmax(predict_res, axis=1), reader.test_labels)
def onkeypress(event): print totitle(self.n), event.key self.data[self.n] = event.key if (self.default_filename): self.save(self.default_filename) self.n = self.g.next() print "Loading: " + totitle(self.n) movie = Reader(self.n, adjuststipple=True) print "Showing" clf() imshow(movie[self.iframe], interpolation='nearest') self.drawrefs() grid(True) axis('image') title(totitle(self.n)) draw()
def load_reader(self, chat): cid = str(chat.id) reader = self.get_reader(cid) if reader is not None: return reader reader = self.get_reader_file(cid) if not reader: reader = Reader.FromChat(chat, self.min_period, self.max_period, self.logger) old_reader = self.memory.add(reader) if old_reader is not None: old_reader.commit_memory() self.store(old_reader) return reader
def main(): input_csv_file_name = sys.argv[1] output_csv_file_name = sys.argv[2] # input values are in the form of [feature_1, feature_2, label] input_values = Reader.csv(input_csv_file_name) # Track previous weights and allow to compare against latest weight to check convergence previous_weights = [0, 0, 0] weights = None Reporter.write_output(file_name=output_csv_file_name, content="", should_overwrite_file=True) training_inputs = [[x[0], x[1]] for x in input_values] results = [x[2] for x in input_values] iterations = 0 while (previous_weights != weights): # Past the initial condition, we want to track the previous_weight if (weights != None): # update previous weight so we can remember for comparison previous_weights = weights # import ipdb; ipdb.set_trace() # weights will be list in the form of [b or w_0, w_1, w_2] weights = PerceptronLearning.run(training_inputs=training_inputs, results=results, initial_weights=previous_weights, iterations=1) # write lines to output file Reporter.write_output( file_name=output_csv_file_name, content=','.join(map(str, [weights[1], weights[2], weights[0]])) + "\n", ) # create png images of the figures Visualizer.draw_chart(input_values=input_values, weights=weights, file_name="figures/figure_" + str(iterations)) iterations += 1
def __init__(self, models_dir, fold_name, writer=None, hyper=None): self._graph = tf.Graph() with self._graph.as_default(): reader = Reader(fold_name) self.fold_size = reader.fold_size with tf.device('/gpu:1'): self._input = reader.inputs(Tester.BATCH_SIZE, is_train=False) self._network = Network(self._input['images'], is_train=False, hyper=hyper) self._probs = self._network.probs() self._cross_entropy_losses = self._network.cross_entropy_losses(self._input['labels']) self._all_summaries = tf.merge_all_summaries() self.models_dir = models_dir print('Tester model folder: %s' %self.models_dir) assert os.path.exists(self.models_dir) self.writer = writer
def train(): batch_size = 32 train_total = 1920000 input_tensor = Input((224, 224, 1), dtype=tf.float32) predicted = VGG16Model(input_tensor) model = Model(inputs=input_tensor, outputs=predicted) model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy']) checkpoint = ModelCheckpoint(filepath='/home/ld/remote_project/CV_HW1/feature/VGG16/checkpoint-{epoch:02d}.hdf5') reader = Reader( '/home/ld/dataset/affNIST/training_and_validation_batches', '/home/ld/dataset/affNIST/test.mat', batch_size=batch_size, resize=[224, 224] ) callbacks = [CustomModelCheckpoint(model, '/home/ld/remote_project/CV_HW1/feature/VGG16/checkpoint-{epoch:02d}.hdf5')] model.fit_generator(reader.train_generator, epochs=5, steps_per_epoch=int(train_total / batch_size + 1), callbacks=callbacks)
def test_3_pull_epochindices_chosenregion(self): os.chdir(rootwd) for key in self.file_stim: reader_io_stim = Reader(self.file_stim[key]) os.chdir(pwd) epoch_indices_soma = reader_io_stim.pull_epochindices_chosenregion( 'soma') epoch_indices_axon = reader_io_stim.pull_epochindices_chosenregion( 'axon') #print epoch_indices_soma #print epoch_indices_axon self.assertNotEqual(epoch_indices_soma, epoch_indices_axon) reader_io_stim.closefile()
def assign_unused_call(init_solution, data: Reader, feasible): """ assign most expensive dummy call to random vehicle """ # find most expensive solution = init_solution.copy() call_dict = data.getCallsDict() record_cost = 0 record_call = None unused_calls = __get_dummy_calls(solution) for call in unused_calls: origin, dest, s, fail_cost, lp, up, ld, ud = call_dict[call] if fail_cost > record_cost: record_call = call record_cost = fail_cost call = record_call if call is None: return solution # remove call assert call != 0 assert call in solution solution.remove(call) solution.remove(call) car_number = math.ceil(random.random() * data.num_cars) start, stop = __get_car_index(car_number, solution, data.num_cars) if stop - start < brute_force_limit: new_sol = __insert_call_brute_force(solution, call, car_number, data, feasible) if new_sol is not None: return new_sol return init_solution else: # add randomly to new car if start == stop + 1: # empty car solution.insert(start, call) solution.insert(start, call) else: t1 = random.randint(start, stop) t2 = random.randint(start, stop) solution.insert(t1, call) solution.insert(t2, call) assert len(solution) == len(init_solution) return solution