def run(**kwargs): params = get_params(kwargs) configure_logging(params) if only_instance_running(params): logging.info('Starting execution.') write_pid_file(params) # create lock to avoid concurrent executions current_exec_time = utcnow() last_exec_time = replace_exec_time(current_exec_time, params['history_path']) if 'config' in params: config = params['config'] else: config = load_config(params['config_path']) config['current_exec_time'] = current_exec_time config['last_exec_time'] = last_exec_time config['query_folder'] = params['query_folder'] config['output_folder'] = params['output_folder'] config['wikis_path'] = params['wikis_path'] reader = Reader(config) selector = Selector(reader, config) executor = Executor(selector, config) writer = Writer(executor, config) writer.run() delete_pid_file(params) # free lock for other instances to execute logging.info('Execution complete.') else: logging.warning('Another instance is already running. Exiting.')
def run(**kwargs): params = get_params(kwargs) configure_logging(params) if only_instance_running(params): logging.info('Starting execution.') write_pid_file(params) # create lock to avoid concurrent executions current_exec_time = utcnow() config = load_config(params['config_path']) config['current_exec_time'] = current_exec_time config['query_folder'] = params['query_folder'] config['output_folder'] = params['output_folder'] config['reruns'], rerun_files = read_reruns(params['query_folder']) reader = Reader(config) selector = Selector(reader, config) executor = Executor(selector, config) writer = Writer(executor, config, configure_graphite(config)) writer.run() delete_reruns(rerun_files) # delete rerun files that have been processed delete_pid_file(params) # free lock for other instances to execute logging.info('Execution complete.') else: logging.warning('Another instance is already running. Exiting.')
def dual(use_spi=False, soft=True): ssd0 = setup(False, soft) # I2C display ssd1 = setup(True, False) # SPI instance Writer.set_textpos(ssd0, 0, 0) # In case previous tests have altered it wri0 = Writer(ssd0, small, verbose=False) wri0.set_clip(False, False, False) Writer.set_textpos(ssd1, 0, 0) # In case previous tests have altered it wri1 = Writer(ssd1, small, verbose=False) wri1.set_clip(False, False, False) nfields = [] dy = small.height() + 6 col = 15 for n, wri in enumerate((wri0, wri1)): nfields.append([]) y = 2 for txt in ('X:', 'Y:', 'Z:'): Label(wri, y, 0, txt) nfields[n].append(Label(wri, y, col, wri.stringlen('99.99'), True)) y += dy for _ in range(10): for n, wri in enumerate((wri0, wri1)): for field in nfields[n]: value = int.from_bytes(uos.urandom(3),'little')/167772 field.value('{:5.2f}'.format(value)) wri.device.show() utime.sleep(1) for wri in (wri0, wri1): Label(wri, 0, 64, ' DONE ', True) wri.device.show()
def test(): log = lambda message: sys.stdout.write(message) writer = Writer('D:/exe/imod/IMOD_USER/pusair-output') try: log('Reading config... ') config.parse() if config.config['gradient'] > 1: raise ValueError('Maximum gradient is 1') log('Done\n') p, adj = slurp.prep( # fbore=str('D:/exe/imod/IMOD_USER/pusair-input/Boreholes_Dimas.ipf'), fbore=str('D:/exe/imod/IMOD_USER/pusair-input/Boreholes_Jakarta.ipf'), fscreen=str('data/well_M_z_all.ipf'), config=config, log=log) interpolator = Interpolator(p, adj, writer, log) interpolator.interpolate() log('\n[DONE]') except Exception as e: log('\n\n[ERROR] {}'.format(e)) traceback.print_exc() writer.reset()
def do_build(args): #load config config_file = os.path.join(args.src_dir,"_config.py") try: config.init(config_file) except config.ConfigNotFoundException: print >>sys.stderr, ("No configuration found: %s" % config_file) parser.exit(1, "Want to make a new site? Try `blogofile init`\n") logger.info("Running user's pre_build() function..") writer = Writer(output_dir="_site") if config.blog_enabled == True: config.pre_build() posts = post.parse_posts("_posts") if args.include_drafts: drafts = post.parse_posts("_drafts", config) for p in drafts: p.draft = True else: drafts = None writer.write_blog(posts, drafts) else: #Build the site without a blog writer.write_site() logger.info("Running user's post_build() function..") config.post_build()
def handle_standard_io(parser): parser.read_file(sys.stdin) #parser.print_file() # what is this for? parser.build_structure() # build doc from actual file writer = Writer() writer.write(parser.node_file) for line in writer.buffer: print line.rstrip()
def do_build(args, load_config=True): if load_config: config_init(args) writer = Writer(output_dir=util.path_join("_site",util.fs_site_path_helper())) logger.debug("Running user's pre_build() function..") config.pre_build() writer.write_site() logger.debug("Running user's post_build() function..") config.post_build()
def render_html(name, source, translator_class=None): writer = Writer() if translator_class is not None: writer.translator_class = translator_class settings = {"stylesheet_path": "/static/html4css1.css,/static/main.css", "embed_stylesheet": False, "file_insertion_enabled": False, "raw_enabled": False, "xml_declaration": False} return publish_string(source, writer_name="html", writer=writer, settings_overrides=settings).decode()
def do_build(args): #load config try: # Always load the _config.py from the current directory. # We already changed to the directory specified with --src-dir config.init("_config.py") except config.ConfigNotFoundException: print >>sys.stderr, ("No configuration found in source dir: %s" % args.src_dir) parser.exit(1, "Want to make a new site? Try `blogofile init`\n") writer = Writer(output_dir="_site") logger.debug("Running user's pre_build() function..") config.pre_build() writer.write_site() logger.debug("Running user's post_build() function..") config.post_build()
def test(use_spi=False): ssd = setup(use_spi) # Create a display instance rhs = WIDTH -1 ssd.line(rhs - 20, 0, rhs, 20, 1) square_side = 10 ssd.fill_rect(rhs - square_side, 0, square_side, square_side, 1) wri = Writer(ssd, freesans20) Writer.set_textpos(ssd, 0, 0) # verbose = False to suppress console output wri.printstring('Sunday\n') wri.printstring('12 Aug 2018\n') wri.printstring('10.30am') ssd.show()
def __init__(self, module): self.module = module self.buf = Writer() # modul-ebene self.function_buf = Writer() # funktionen kommen seperat self.label_count = 0 self.jump_table = {} # offsets der funktionen
def main(): print "Welcome to draugiem.lv message downloader " sys.stdout.write("Email: ") sys.stdout.flush() email = raw_input() password = getpass.getpass() try: downloader = MessageDownloader(email, password) except DraugiemException: print "invalid username/password" sys.exit(1) def progress_show(current, total): sys.stdout.write("%3d of %d\r" % (current, total)) sys.stdout.flush() print "[1/4] downloading inbox" downloader.get_messages(type = 'in', progress_callback = progress_show) print "[2/4] downloading outbox" downloader.get_messages(type = 'out', progress_callback = progress_show) print "[3/4] sorting messages" msgs = downloader.get_all_messages() sys.stdout.write("Enter path: ") sys.stdout.flush() path = raw_input() if not os.path.exists(path): os.mkdir(path) print "[4/4] writing" for user in msgs: w = Writer(os.path.join(path, "%s.html" % (downloader.get_user_info(user).replace("/", "")))) w.start(downloader.users[user]) for item in msgs[user]: w.write(item) w.end()
def inverse(use_spi=False, soft=True): ssd = setup(use_spi, soft) # Create a display instance rhs = WIDTH -1 ssd.line(rhs - 20, 0, rhs, 20, 1) square_side = 10 ssd.fill_rect(rhs - square_side, 0, square_side, square_side, 1) Writer.set_textpos(ssd, 0, 0) # In case previous tests have altered it wri = Writer(ssd, freesans20, verbose=False) wri.set_clip(False, False, False) # Char wrap wri.printstring('Sunday\n') wri.printstring('12 Aug 2018\n') wri.printstring('10.30am', True) # Inverse text ssd.show()
def wrap(use_spi=False, soft=True): ssd = setup(use_spi, soft) # Create a display instance Writer.set_textpos(ssd, 0, 0) # In case previous tests have altered it wri = Writer(ssd, freesans20, verbose=False) wri.set_clip(False, False, True) # Word wrap wri.printstring('the quick brown fox jumps over') ssd.show()
def main(argv): try: opts, args = getopt.getopt(argv, "i:o:", ["input", "output"]) except getopt.GetoptError: usage() sys.exit(2) for o, a in opts: if o in ("-i", "--input"): inputFile = a elif o in ("-o", "--output"): outputFile = a parser = Parser(inputFile) writer = Writer() writer.fromParser(parser) # TODO implement dumping to stdout writer.dumpToFile(outputFile)
def test_word_count(self): file_name = 'data/01-train-input.txt' reader = Reader(file_name) reader.read_file() unigram = TrainUnigram(reader.word_list, reader.total_word_count) unigram.train_unigram() file_name = 'data/unigram_model' write_model = Writer(file_name, unigram.word_dict) write_model.write_file() model = Model(file_name) model.read_model() file_name = 'data/01-test-input.txt' test = Evaluater(file_name, model.word_dict) test.evaluate_model() print('entropy is ' + str(test.H / test.total_word_count)) print('coverage is ' + str(1.0 * (test.total_word_count - test.unknown_word_count) / test.total_word_count))
def test_word_count(self): file_name = 'data/02-train-input.txt' reader = Reader(file_name) reader.read_file() ngram = TrainNgram(reader.word_list, 3) ngram.train() print({k: v for k, v in ngram.word_dict.items()}) print({k: v for k, v in ngram.lambda_word_dict.items()}) file_name = 'data/ngram_model' writer_model = Writer(file_name, ngram.word_dict) writer_model.write_file() model = ReadNgram(file_name) model.read_model() file_name = 'data/02-train-input.txt' test = EvaluateModel(file_name, model.word_dict, ngram.lambda_word_dict) test.evaluate() print('entropy is ' + str(test.H / test.total_word_count))
def main(args): stats = Stats() transactions = TransactionsList(args.infile) if args.algorithm == 'apriori': algorithm = Apriori(transactions, args.minsup) else: algorithm = Dic(transactions, args.minsup, args.m) large_sets, counter = algorithm.get_large_sets_and_counter() stats.record_post_large_sets() rules = RulesGenerator.generate_rules(large_sets, args.minconf, counter, transactions) stats.record_post_rules() writer = Writer(args.outfile) writer.add_args(args) writer.add_stats(stats) writer.add_rules(rules) writer.write()
def test_word_count(self): fileName = 'data/02-train-input.txt' reader = Reader(fileName) reader.file_Read() bigram = TrainBigram(reader.word_list) bigram.train() print({k: v for k, v in bigram.word_dict.items()}) print({k: v for k, v in bigram.lambda_word_dict.items()}) fileName = 'data/bigram_model' writemodel = Writer(fileName, bigram.word_dict) writemodel.write_file() model = ModelReader(fileName) model.read_model() fileName = 'data/02-train-input.txt' test = EvaluateModel(fileName, model.word_dict, bigram.lambda_word_dict) test.evaluate_model() print('entropy is ' + str(test.H / test.total_word_count))
def handle_files(parser, files, dir_source, dir_dest): for filename in files: print '--------', filename, '--------' # TODO: handle exceptions on file file = open(filename, 'r') node_file = parser.read_file(file) file.close() #parser.print_file() # what is this for? parser.build_structure(node_file) # build doc from actual file writer = Writer() writer.write(node_file) filepath = fs.transform_filepath(filename, dir_source, dir_dest) print 'path', filename, dir_source, dir_dest, filepath # TODO: handle exceptions on file file = open(filepath, 'w') file.write(''.join(writer.buffer)) file.close()
def test_word_count(self): file_name = 'data/01-train-input.txt' reader = Reader(file_name) reader.read_file() unigram = TrainUnigram(reader.word_list, reader.total_word_count) unigram.train() file_name = 'data/unigram_model' writemodel = Writer(file_name, unigram.word_dict) writemodel.write_file() file_name = 'data/03-train-input.txt' cfeature = FeatureReader(file_name) cfeature.read_feature() print({k: v for k, v in cfeature.feature_dict.items()}) online = OnlineLearning(cfeature.feature_dict, unigram.word_dict, 'UNI:') online.online_learning() print({k: v for k, v in online.phi.items()}) prediction = OnePrediction('data/03-train.txt', online.weight, online.phi, 'UNI:') prediction.predict()
def handle_buffer(buffer): markup_input = markup.find_markup('rst', 'python') markup_output = markup.find_markup('rst', 'python') #print buffer if not markup_output: markup_output = markup_input # TODO pass markup directly instead of assigning to Node Node.writer = markup_output.writer Node.reader = markup_input.reader Node.lang = markup_input.lang parser = markup_input.parser node = parser.read_file(buffer) #parser.print_file() # what is this for? parser.build_structure(node) # build doc from actual file writer = Writer() writer.write(node) return writer.buffer
def __init__(self, utils): """ Constructor @param utils: A module containing a lot of utility-functions as well as data which is needed almost everywhere. @type utils: L{SpecDebugUtils} """ #: A module containing a lot of utility-functions. #: @type: L{SpecDebugUtils} self.__utils = utils #: A utility class that allows us to print the countertrace. #: @type: L{Writer} self.__writer = Writer(utils)
def parse(self, filepath): """ 解析 """ writer = Writer(filepath, self) self.writer = writer while not(self.lookAhead() is None): node = self.parseProgram() if not(node is None): node.run(writer) # スクリプトの終端を書き込む writer.writeString(Code.CODE_END) writer.writeCrlf() # 終了処理 writer.finalize() print "convert done."
def __init__(self, utils, counterstrategy, z_array, countertrace): """ Constructor @param utils: A module containing a lot of utility-functions as well as data which is needed almost everywhere. @type utils: L{SpecDebugUtils} @param counterstrategy: A counterstrategy, i.e., a strategy for the environment to find inputs so that the system is forced to violate the specification. @type counterstrategy: L{BDD} @param z_array: z_array[a] contains the intermediate results of the fixpoint computation in the variable Z of the computation of the winning region for the environment. 'a' counts the iterations of the fixpoint computation in Z. It is used to figure out how often the value of jx might still change in the future of the play. (If the play is in z_array[a], jx might change at most a-1 times.) @type z_array: list<L{BDD}> @param countertrace: A sequence of inputs so that the system is forced to violate its specification. @type countertrace: L{InfiniteTraceOfBdds} """ #: A module containing a lot of utility-functions. #: @type: L{SpecDebugUtils} self.__utils = utils #: A winning strategy for the environment. #: @type: L{BDD} self.__counterstrategy = counterstrategy #: Intermediate results of the fixpoint computation in the variable Z. #: @type: list<L{BDD}> self.__z_array = z_array #: A sequence of inputs so that the system is forced to violate its #: specification. #: @type: L{InfiniteTraceOfBdds} self.__countertrace = countertrace #: A utility class that allows us to print the summary of the play. #: @type: L{Writer} self.__writer = Writer(utils)
def process(self): util.log('METHOD : %s' % self.name, 'debug') if 0: from androguard.core import bytecode bytecode.method2png('/tmp/graphs/%s#%s.png' % \ (self.method.get_class_name().split('/')[-1][:-1], self.name), self.metha) graph = construct(self.basic_blocks, self.var_to_name, self.exceptions) self.graph = graph if graph is None: return if 0: util.create_png(self.basic_blocks, graph, '/tmp/blocks') #'dad_graphs/blocks') defs, uses = build_def_use(graph, self.lparams) dead_code_elimination(graph, uses, defs) register_propagation(graph, uses, defs) # After the DCE pass, some nodes may be empty, so we can simplify the # graph to delete these nodes. # We start by restructuring the the graph by spliting the conditional # nodes into a pre-header and a header part. # We then simplify the graph by merging multiple statement nodes into # a single statement node when possible. This also delete empty nodes. graph.split_if_nodes() graph.simplify() graph.reset_rpo() idoms = immediate_dominator(graph) identify_structures(graph, idoms) if 0: util.create_png(self.basic_blocks, graph, '/tmp/structured') # 'dad_graphs/structured') self.writer = Writer(graph, self) self.writer.write_method()
def __init__(self, **kwargs): """Initialize the keycode bindings and all controllers, create output dir.""" settings = { 'experimentName' : 'someNameHere', 'outputRootDir' : os.path.normpath(os.path.expanduser("~/kauferdata")), 'keyBindings' : { 'quit' : 'q', 'toggleStimulator' : 't', 'triggerStimulator' : 's' }, 'audio' : {}, 'gui' : {}, 'stimulator' : { 'activeProtocolName' : 'nucleusAccumbensExample' }, 'tracker' : {}, 'videoIn' : {}, 'videoOut' : {}, 'writer' : {}, } settings.update(kwargs) trialDir = "{0}_{1}_{2}".format(time.strftime("%y%m%d%H%M%S"), settings['experimentName'], settings['stimulator']['activeProtocolName']) settings['outputDataDir'] = os.path.join(settings['outputRootDir'], trialDir) self.keyBindings = settings['keyBindings'] self.keycodeBindings = {k: getattr(opencvgui.keycodes, v) for k,v in self.keyBindings.items() } os.makedirs(os.path.join(settings['outputDataDir'], 'audio')) self.writeExperimentData(settings) for x in ['audio', 'videoOut', 'writer']: kwargs[x] = {} if not kwargs.has_key(x) else kwargs[x] kwargs[x]['outputDataDir'] = settings['outputDataDir'] # gui and writer both need a reference to the app instance because they # draw on the state of so many other controllers self.audio = Audio(**settings['audio']) self.gui = Gui(self, **settings['gui']) self.stimulator = Stimulator(**settings['stimulator']) self.tracker = Tracker(**settings['tracker']) self.videoIn = VideoIn(**settings['videoIn']) self.videoOut = VideoOut(**settings['videoOut']) self.writer = Writer(self, **settings['writer'])
def main(): my_runner = Runner(Getter(), Writer(), Deleter(), Copier()) my_runner.run_it()
class Abstractor(object): """High level model that handles intializing the underlying network architecture, saving, updating examples, and predicting examples. """ # -------------------------------------------------------------------------- # Initialization # -------------------------------------------------------------------------- def __init__(self, args, src_dict, tgt_dict, state_dict=None): # Book-keeping. self.args = args self.word_dict = src_dict self.args.vocab_size = len(src_dict) self.tgt_dict = tgt_dict self.args.tgt_vocab_size = len(tgt_dict) self.updates = 0 self.use_cuda = False self.parallel = False if args.model_type == 'rnn': self.network = Writer(args, tgt_dict) else: raise RuntimeError('Unsupported model: %s' % args.model_type) # Load saved state if state_dict: # Load buffer separately if 'fixed_embedding' in state_dict: fixed_embedding = state_dict.pop('fixed_embedding') self.network.load_state_dict(state_dict) self.network.register_buffer('fixed_embedding', fixed_embedding) else: self.network.load_state_dict(state_dict) if self.args.ema: self.ema = ExponentialMovingAverage(0.999) for name, param in self.network.named_parameters(): if param.requires_grad: self.ema.register(name, param.data) def expand_dictionary(self, words): # TODO: at present, only supported for source dictionary """Add words to the Reader dictionary if they do not exist. The underlying embedding matrix is also expanded (with random embeddings). Args: words: iterable of tokens to add to the dictionary. Output: added: set of tokens that were added. """ to_add = {self.word_dict.normalize(w) for w in words if w not in self.word_dict} # Add words to dictionary and expand embedding layer if len(to_add) > 0: logger.info('Adding %d new words to dictionary...' % len(to_add)) for w in to_add: self.word_dict.add(w) self.args.vocab_size = len(self.word_dict) logger.info('New vocab size: %d' % len(self.word_dict)) old_embedding = self.network.encoder.word_embeddings.embedding.weight.data self.network.encoder.word_embeddings.embedding = torch.nn.Embedding(self.args.vocab_size, self.args.emsize, padding_idx=0) new_embedding = self.network.encoder.word_embeddings.embedding.weight.data new_embedding[:old_embedding.size(0)] = old_embedding # Return added words return to_add @staticmethod def load_embeddings(word_dict, words, embedding_file, emb_layer, fix_embeddings): """Load pretrained embeddings for a given list of words, if they exist. #TODO: update args Args: words: iterable of tokens. Only those that are indexed in the dictionary are kept. embedding_file: path to text file of embeddings, space separated. """ words = {w for w in words if w in word_dict} logger.info('Loading pre-trained embeddings for %d words from %s' % (len(words), embedding_file)) # When normalized, some words are duplicated. (Average the embeddings). vec_counts, embedding = {}, {} with open(embedding_file) as f: for line in f: parsed = line.rstrip().split(' ') assert (len(parsed) == emb_layer.word_vec_size + 1) w = word_dict.normalize(parsed[0]) if w in words: vec = torch.Tensor([float(i) for i in parsed[1:]]) if w not in vec_counts: vec_counts[w] = 1 embedding[w] = vec else: logging.warning( 'WARN: Duplicate embedding found for %s' % w ) vec_counts[w] = vec_counts[w] + 1 embedding[w].add_(vec) for w, c in vec_counts.items(): embedding[w].div_(c) emb_layer.init_word_vectors(word_dict, embedding, fix_embeddings) logger.info('Loaded %d embeddings (%.2f%%)' % (len(vec_counts), 100 * len(vec_counts) / len(words))) def load_src_embeddings(self, words, embedding_file): """Load pretrained embeddings for a given list of words, if they exist. Args: words: iterable of tokens. Only those that are indexed in the dictionary are kept. embedding_file: path to text file of embeddings, space separated. """ self.load_embeddings(self.word_dict, words, embedding_file, self.network.encoder.word_embeddings.embedding, self.args.fix_embeddings) def load_tgt_embeddings(self, words, embedding_file): """Load pretrained embeddings for a given list of words, if they exist. Args: words: iterable of tokens. Only those that are indexed in the dictionary are kept. embedding_file: path to text file of embeddings, space separated. """ fix_embeddings = not self.args.share_decoder_embeddings and self.args.fix_embeddings self.load_embeddings(self.tgt_dict, words, embedding_file, self.network.word_embeddings.embedding, fix_embeddings) def init_optimizer(self, state_dict=None, use_gpu=True): """Initialize an optimizer for the free parameters of the network. Args: state_dict: optimizer's state dict use_gpu: required to move state_dict to GPU """ if self.args.fix_embeddings: for p in self.network.encoder.word_embeddings.embedding.parameters(): p.requires_grad = False if not self.args.share_decoder_embeddings and self.args.fix_embeddings: for p in self.network.word_embeddings.embedding.parameters(): p.requires_grad = False parameters = [p for p in self.network.parameters() if p.requires_grad] if self.args.optimizer == 'sgd': self.optimizer = optim.SGD(parameters, self.args.learning_rate, momentum=self.args.momentum, weight_decay=self.args.weight_decay) elif self.args.optimizer == 'adamax': self.optimizer = optim.Adamax(parameters, weight_decay=self.args.weight_decay) else: raise RuntimeError('Unsupported optimizer: %s' % self.args.optimizer) if state_dict is not None: self.optimizer.load_state_dict(state_dict) # TODO: fix me [temporary: https://github.com/pytorch/pytorch/issues/2830] if use_gpu: for state in self.optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() # -------------------------------------------------------------------------- # Learning # -------------------------------------------------------------------------- def _make_src_map(self, data): """ ? """ src_size = max([t.size(0) for t in data]) src_vocab_size = max([t.max() for t in data]) + 1 alignment = torch.zeros(len(data), src_size, src_vocab_size) for i, sent in enumerate(data): for j, t in enumerate(sent): alignment[i, j, t] = 1 return alignment def _align(self, data): """ ? """ tgt_size = max([t.size(0) for t in data]) alignment = torch.zeros(len(data), tgt_size).long() for i, sent in enumerate(data): alignment[i, :sent.size(0)] = sent return alignment def update(self, ex): """Forward a batch of examples; step the optimizer to update weights.""" if not self.optimizer: raise RuntimeError('No optimizer set.') # Train mode self.network.train() # To enable copy attn, collect source map and alignment info if self.args.copy_attn: assert 'src_map' in ex and 'alignment' in ex source_map = self._make_src_map(ex['src_map']) alignment = self._align(ex['alignment']) source_map = Variable(source_map.cuda(async=True)) if self.use_cuda \ else Variable(source_map) alignment = Variable(alignment.cuda(async=True)) if self.use_cuda \ else Variable(alignment) else:
pcs = machine.Pin('X2', machine.Pin.OUT_PP) prst = machine.Pin('X3', machine.Pin.OUT_PP) spi = machine.SPI(1) display = SSD1306_SPI(WIDTH, HEIGHT, spi, pdc, prst, pcs) else: # I2C # Pyb SSD # 3v3 Vin # Gnd Gnd # Y9 CLK # Y10 DATA HEIGHT = 64 pscl = machine.Pin('Y9', machine.Pin.OUT_PP) psda = machine.Pin('Y10', machine.Pin.OUT_PP) i2c = machine.I2C(scl=pscl, sda=psda) display = SSD1306_I2C(WIDTH, HEIGHT, i2c) serif = Writer(display, freeserif) sans = Writer(display, freesans20) Writer.set_clip(True, True) # Disable auto scrolling and wrapping. serif.printstring('Tuesday\n') sans.printstring('8 Nov 2016\n') sans.printstring('10.30am') display.show() def scroll_test(x, y): t = utime.ticks_us() display.scroll(x, y) # 125ms print(utime.ticks_diff(utime.ticks_us(), t)) display.show()
def _make_writer(self): template = '<html><head></head><body>{}</body></html>' return Writer(template)
def __init__(self, states, symbols, acceptableStates, q0, transitions): super(EpsilonNFA, self).__init__(states, symbols, acceptableStates, q0, transitions) self.simulationLog = "" self.writer = Writer(self)
logger.info('PARAMETER ...') logger.info(opt) logger.info('Load Dataset ...') dataset = DataLoader(opt, opt.train_data) dataset_size = len(dataset) print('#training meshes = %d' % dataset_size) logger.info('#training meshes = %d', dataset_size) model = ClassifierModel(opt) num_total_params = sum(p.numel() for p in model.net.parameters()) num_trainable_params = sum(p.numel() for p in model.net.parameters() if p.requires_grad) print('Number of total paramters: %d, number of trainable parameters: %d' % (num_total_params, num_trainable_params)) logger.info('Number of total paramters: %d, number of trainable parameters: %d', num_total_params, num_trainable_params) writer = Writer(opt) total_steps = 0 train_start_time = time.time() best_tst_acc = 0.0 torch.manual_seed(1) cudnn.benchmark = False cudnn.deterministic = True for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): epoch_start_time = time.time() iter_data_time = time.time() epoch_iter = 0 heappop_error_train = 0 logger.info('Epoch %d started ...', epoch) writer.reset_counter()
return self.name def _process(self, msg): """ Send message to measure unit and process answer :param msg: message to send :returns: parsed answer (dictionary) """ ans = self.measureIface.Send(msg) if self.measureIface.state != self.measureIface.IF_OK: return {} res = self.measureUnit.Result(msg, ans) if self.writerUnit is not None and res is not None and len(res) > 0: self.writerUnit.WriteData(res) return res if __name__ == "__main__": from iface import Iface from measureunit import MeasureUnit from writer import Writer mu = MeasureUnit('Test', 'Proba') iface = Iface('iface') wrt = Writer() print(mu.GetName()) print(mu.GetType()) print(iface.GetName()) print(iface.GetState()) a = Instrument("test instrument", mu, iface, wrt)
def scroll(use_spi=False, soft=True): ssd = setup(use_spi, soft) # Create a display instance rhs = WIDTH -1 ssd.line(rhs - 20, 0, rhs, 20, 1) square_side = 10 ssd.fill_rect(rhs - square_side, 0, square_side, square_side, 1) Writer.set_textpos(ssd, 0, 0) # In case previous tests have altered it wri = Writer(ssd, freesans20, verbose=False) wri.set_clip(False, False, False) # Char wrap wri.printstring('Sunday\n') wri.printstring('12 Aug 2018\n') wri.printstring('10.30am') for x in range(5): ssd.show() utime.sleep(2) wri.printstring('\nCount = {:2d}'.format(x)) ssd.show() utime.sleep(2) wri.printstring('\nDone.') ssd.show()
def func_train(self): self.writer = Writer(self.files)
tim.init(period=3 * 1000, mode=Timer.ONE_SHOT, callback=page6) def page6(tim): oled.poweroff() def BatteryPage(tim): global BattLevel oled.fill(0) icon = util.loadImage('icons/battery' + str(BattLevel) + '4.pbm') oled.blit(icon, 100, 0) oled.show() if (BattLevel < 4): BattLevel += 1 tim.init(period=1 * 1000, mode=Timer.ONE_SHOT, callback=BatteryPage) else: tim.init(period=1 * 1000, mode=Timer.ONE_SHOT, callback=page2) i2c = machine.I2C(-1, PinScl, PinSda) print('') print(i2c.scan()) oled = ssd1306.SSD1306_I2C(OLED_WIDTH, OLED_HEIGHT, i2c) wrtr = Writer(oled, font6) Writer.set_textpos(oled, 0, 0) wrtr.printstring("Hello World!") oled.show() tim = Timer(-1) tim.init(period=3 * 1000, mode=Timer.ONE_SHOT, callback=BatteryPage)
def update_bottom_area(msg): Writer.set_textpos(OLED, row=44, col=0) WRI_BT.printstring(msg) OLED.show()
from machine import I2C from machine import Pin from ssd1306 import SSD1306_I2C import machine from writer import Writer import time import dseg_18 import rsu_text_font_14 as dseg_12 days = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'] scl = Pin(22) sda = Pin(21) i2c = I2C(scl=scl, sda=sda, freq=450000) oled = SSD1306_I2C(128, 64, i2c) dseg18 = Writer(oled, dseg_18) dseg12 = Writer(oled, dseg_12) rtc = machine.RTC() rtc.ntp_sync(server="pool.ntp.org") time.sleep(2) while not rtc.synced(): machine.idle() while True: _tm = time.localtime() print(_tm) oled.fill(0) dseg12.set_textpos(5,40) dseg12.printstring('{0:02d}/{1:02d}/{2} {3}'.format(_tm[2], _tm[1], _tm[0], days[_tm[6]])) dseg18.set_textpos(30,20) dseg18.printstring('{0:02d}:{1:02d}:{2:02d}'.format(_tm[3] + 6, _tm[4], _tm[5])) oled.show() time.sleep(1)
class DvMethod(): def __init__(self, methanalysis): self.method = methanalysis.get_method() self.metha = methanalysis self.name = self.method.get_name() self.lparams = [] self.basic_blocks = [bb for bb in methanalysis.basic_blocks.get()] self.var_to_name = {} self.writer = None access = self.method.get_access_flags() self.access = [flag for flag in util.ACCESS_FLAGS_METHODS if flag & access] desc = self.method.get_descriptor() self.type = util.get_type(desc.split(')')[-1]) self.params_type = util.get_params_type(desc) self.exceptions = methanalysis.exceptions.exceptions code = self.method.get_code() if code is None: util.log('No code : %s %s' % (self.name, self.method.get_class_name()), 'debug') else: start = code.registers_size - code.ins_size if 0x8 not in self.access: self.var_to_name[start] = ThisParam(start, self.name) self.lparams.append(start) start += 1 num_param = 0 for ptype in self.params_type: param = start + num_param self.lparams.append(param) self.var_to_name.setdefault(param, Param(param, ptype)) num_param += util.get_type_size(ptype) def process(self): util.log('METHOD : %s' % self.name, 'debug') if 0: from androguard.core import bytecode bytecode.method2png('/tmp/graphs/%s#%s.png' % \ (self.method.get_class_name().split('/')[-1][:-1], self.name), self.metha) graph = construct(self.basic_blocks, self.var_to_name, self.exceptions) self.graph = graph if graph is None: return if 0: util.create_png(self.basic_blocks, graph, '/tmp/blocks') #'dad_graphs/blocks') defs, uses = build_def_use(graph, self.lparams) dead_code_elimination(graph, uses, defs) register_propagation(graph, uses, defs) # After the DCE pass, some nodes may be empty, so we can simplify the # graph to delete these nodes. # We start by restructuring the the graph by spliting the conditional # nodes into a pre-header and a header part. # We then simplify the graph by merging multiple statement nodes into # a single statement node when possible. This also delete empty nodes. graph.split_if_nodes() graph.simplify() graph.reset_rpo() idoms = immediate_dominator(graph) identify_structures(graph, idoms) if 0: util.create_png(self.basic_blocks, graph, '/tmp/structured') # 'dad_graphs/structured') self.writer = Writer(graph, self) self.writer.write_method() def show_source(self): if self.writer is not None: print self.writer def get_source(self): if self.writer is not None: return '%s' % self.writer return '' def __repr__(self): return 'Method %s' % self.name
def __init__(self, vg): self.vg = vg wri_big = Writer(vg.oled, pacifico_35, verbose=False) wri_small = Writer(vg.oled, font6, verbose=False) wri_fa = Writer(vg.oled, fa_30, verbose=False) self.lbl_status = Label(wri_small, 0, 0, 127) self.cur_scr = -1 self.scr_objs = [ [ # km/h screen # dynamic objects have a call back (cb) lambda Label(wri_big, 16, 0, '22.2', right=True, cb=lambda: '{:0.1f}'.format(self.vg.getSpeed())), # static objects have None Label(wri_small, 37, 90, 'km/h') ], [ # voltage screen Label(wri_big, 16, 0, '2222', right=True, cb=lambda: str(self.vg.ina.getV())), Label(wri_small, 37, 100, 'mV') ], [ # current screen Label(wri_big, 16, 0, '-222', right=True, cb=lambda: '{:4d}'.format(self.vg.ina.getI())), Label(wri_small, 37, 100, 'mA') ], [ # power screen Label(wri_big, 16, 0, '-2.22', right=True, cb=lambda: '{:0.2f}'.format(self.vg.ina.getV() * self.vg. ina.getI() / 1000000)), Label(wri_small, 37, 110, 'W') ], [ # Hotspot on off screen Label(wri_fa, 22, 8, '', cb=lambda: '' if self.vg.isAP else ''), Label(wri_small, 22, 65, 'AP mode'), Label(wri_small, 38, 65, 'velogen') ] ] refresh(self.vg.oled, True)
import time from tkinter import * import tkinter.filedialog as fd from tkinter import messagebox import os import sys import argparse from pathlib import Path from random import uniform from writer import Writer FILE_NAME = 'text.txt' DEFAULT_INTERVAL = 0 DEFAULT_TIMEOUT = 5 writer = Writer(DEFAULT_INTERVAL) def createParser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument('--source', '-s', type=Path, help='Путь к файлу с текстом, из которого нужно переписать.') parser.add_argument('--clipboard', '-clip', type=bool, help='Переписать ли из буффера обмена.') parser.add_argument('--interval_start', '-int_s', type=float, help='Начало интервала печати между каждым символом.', default=DEFAULT_INTERVAL) parser.add_argument('--interval_end', '-int_e', type=float, help='Конец интервала печати между каждым символом.', default=DEFAULT_INTERVAL) parser.add_argument('--timeout', '-t', type=float, help='Задержка перед началом печати.', default=DEFAULT_TIMEOUT) return parser def choose_file(): global FILE_NAME
def tabs(use_spi=False, soft=True): ssd = setup(use_spi, soft) # Create a display instance Writer.set_textpos(ssd, 0, 0) # In case previous tests have altered it wri = Writer(ssd, fixed, verbose=False) wri.set_clip(False, False, False) # Char wrap wri.printstring('1\t2\n') wri.printstring('111\t22\n') wri.printstring('1111\t1') ssd.show()
def __init__(self, i2c): self.ssd1306 = SSD1306_I2C(128, 64, i2c) self.writer = Writer(self.ssd1306, freesans20, verbose=False) self.writer.set_clip(True, True)
def fonts(use_spi=False, soft=True): ssd = setup(use_spi, soft) # Create a display instance Writer.set_textpos(ssd, 0, 0) # In case previous tests have altered it wri = Writer(ssd, freesans20, verbose=False) wri.set_clip(False, False, False) # Char wrap wri_f = Writer(ssd, small, verbose=False) wri_f.set_clip(False, False, False) # Char wrap wri_f.printstring('Sunday\n') wri.printstring('12 Aug 2018\n') wri.printstring('10.30am') ssd.show()
class PushdownAutomaton(Automaton): """ A deterministic pushdown automaton with epsilon transitions behavior simulator. """ def __init__(self, states, symbols, stacksymbols, acceptableStates, q0, z0, transitions): """ Stores the automaton definition. states: States automaton can be in symbols: Input symbols that can be feed to the automaton entry stacksymbols: symbols that can be stored on the automatons stack acceptableStates: States in which the automaton should be when the simulation ends if the automaton accepts the char sequence fed to it q0: Start state z0: Start stack symbols tranistions: List of tranisitions the automaton can make """ super(PushdownAutomaton, self).__init__(states, symbols, acceptableStates, q0, transitions) self.stacksymbols = stacksymbols self.z0 = z0 self.stack = [] self.simulationLog = "" self.writer = Writer(self) @staticmethod def parseAutomatonDefinition(definition): """ Parses the automaton definition (if the definition is correctly given) and returns a class instance. """ if len(definition) < 8: return None pushdownAutomaton = \ PushdownAutomaton(definition[1], definition[2], definition[3]\ , definition[4], definition[5], definition[6], definition[7:]) return pushdownAutomaton def simulate(self, input): """ Simulates the behavior of the automaton for a given input. """ #print and log the information about the start of simulation self.writer.writeSimulationIntro() #initialize the automaton according to the definition currentState = self.q0 self.stack = [] self.stack.extend(["$", self.z0]) #print and log the state of the automaton on the start of simulation self.writer.writeCurrentState(currentState) currentState = self.makeEpsilonTransition(currentState) #simulate behaviour for every character in the given input for index, character in enumerate(input): currentState = self.makeTransition(currentState, character) if currentState is None: #end the simulation with an unsuccesful annotation self.writer.writeSimulationEnd(currentState, False) return self.simulationLog #don't make epsilon tranisions if the automaton is in an acceptable state #and has digested all the characters in the given sequence if index + 1 < len( input) or currentState not in self.acceptableStates: currentState = self.makeEpsilonTransition(currentState) #if the automaton digests all characters without stopping end the #simulation with successfull annotation self.writer.writeSimulationEnd(currentState, True) return self.simulationLog def makeTransition(self, currentState, character, isEpsilon=False): """ Makes a tranisition given a current state and an input character. Uses different behaviour if the transition is an epsilon tranisition. Returns the next state of the automaton (None if the transition can't be made). """ nextState = None addToStack = None #character to be added to the stack poppedCharacter = "$" #get the character that is currently on top of stack try: poppedCharacter = self.stack.pop() except Exception as e: print("EXCEPTION:", currentState, character, isEpsilon) #concatinate the parameters to get a key in the tranitions dictionary currentConfiguration = currentState + "," + character + "," + poppedCharacter #if the key is contained in the dictionary if currentConfiguration in self.transitions: #proccess the transition nextState = self.transitions.get(currentConfiguration)[0] addToStack = list(self.transitions.get(currentConfiguration)[1]) #add the characters specified by the transition to the top of stack for i in range(len(addToStack) - 1, -1, -1): if addToStack[i] == "$": break self.stack.append(addToStack[i]) #print and log the current state of the automaton self.writer.writeCurrentState(nextState, currentState, character) #if the transition is not possible and the tried transition was an #epsilon transition, return the popped character to the stack elif isEpsilon: self.stack.append(poppedCharacter) return nextState def makeEpsilonTransition(self, currentState): """ Makes epsilon transition from a given current state until the epsilon tranistion can't be made or the state transitioned to is an acceptable state of the automaton. """ nextState = self.makeTransition(currentState, "$", True) #if the epsilon transition cannot be made if nextState is None: #return the last state automaton was found in before the transition return currentState #return the current state if it is an acceptable state if nextState in self.acceptableStates: return nextState #otherwise try to make a new epsilon transition recursively return self.makeEpsilonTransition(nextState) def getStackString(self): """ Returns a string representation of the stack """ return "".join(self.stack[:0:-1]) def __str__(self): return ('Automaton definition (pushdown automaton):\n' 'States: {automaton.states}\n' 'symboles: {automaton.symbols}\n' 'Stack symboles: {automaton.stacksymbols}\n' 'Acceptable states: {automaton.acceptableStates}\n' 'Starting state: {automaton.q0}\n' 'Starting stack: {automaton.z0}\n')\ .format(automaton = self)
def rjust(use_spi=False, soft=True): ssd = setup(use_spi, soft) # Create a display instance Writer.set_textpos(ssd, 0, 0) # Previous tests may have altered it wri = Writer(ssd, freesans20, verbose=False) wri.set_clip(False, False, False) # Char wrap my_str = 'Sunday\n' l = wri.stringlen(my_str) Writer.set_textpos(ssd, col = WIDTH - l) wri.printstring(my_str) my_str = '12 Aug 2018\n' l = wri.stringlen(my_str) Writer.set_textpos(ssd, col = WIDTH - l) wri.printstring(my_str) my_str = '10.30am' l = wri.stringlen(my_str) Writer.set_textpos(ssd, col = WIDTH - l) wri.printstring(my_str) ssd.show()
class EpsilonNFA(Automaton): """ An non-deterministic finite automaton with epsilon transitions behavior simulator. """ def __init__(self, states, symbols, acceptableStates, q0, transitions): super(EpsilonNFA, self).__init__(states, symbols, acceptableStates, q0, transitions) self.simulationLog = "" self.writer = Writer(self) @staticmethod def parseAutomatonDefinition(definition): """ Parses the automaton definition (if the definition is correctly given) and returns a class instance. """ if len(definition) < 6: return None epsilonNFA = \ EpsilonNFA(definition[1], definition[2], definition[3]\ , definition[4], definition[5:]) return epsilonNFA def simulate(self, input): """ Simulates the behavior of the automaton for a given character sequence (input). """ #initialize simulation with starting state and an epsilon transition self.writer.writeSimulationIntro() currentStates = {self.q0} currentStates = currentStates.union( self.makeEpsilonTransition(currentStates)) self.writer.writeCurrentStates(currentStates) for character in input: nextStates = self.makeTransition(currentStates, character) nextStates = nextStates.union( self.makeEpsilonTransition(nextStates)) currentStates.clear() currentStates = currentStates.union(nextStates) #if transitions did not result in any new states, add hashtag if not currentStates: currentStates.add('#') self.writer.writeCurrentStates(currentStates) self.writer.writeSimulationEnd(currentStates, success=True) return self.simulationLog def makeTransition(self, currentStates, character, isEpsilon=False): """ Makes a tranisition given a current state and an input character. Uses different behavior if the transition is an epsilon tranisition. Returns the next states of the automaton (None if the transition can't be made). """ nextStates = set() for state in currentStates: currentConfiguration = state + "," + character if currentConfiguration in self.transitions: newStates = self.transitions.get(currentConfiguration) nextStates = nextStates.union(newStates) self.writer.writeTransition(state, character, newStates, isEpsilon) nextStates.discard('#') #hashtas will be added later if needed return nextStates def makeEpsilonTransition(self, currentStates): """ Makes a transition with an epsilon transition character recursively until the transition cannot be made anymore. Original call af the function will return all new states gathered in upper described recursive search. """ nextStates = self.makeTransition(currentStates, '$', True) #if epsilon transition did not occur or it started an infitine loop if not nextStates or nextStates == currentStates: return currentStates #end recursion else: return nextStates.union(self.makeEpsilonTransition(nextStates)) def __str__(self): return ('Automaton definition ' '(non-finite automaton with epsilon transitions automaton):\n' 'States: {automaton.states}\n' 'symboles: {automaton.symbols}\n' 'Acceptable states: {automaton.acceptableStates}\n' 'Starting state: {automaton.q0}\n')\ .format(automaton = self)
class DvMethod(): def __init__(self, methanalysis): self.method = methanalysis.get_method() self.metha = methanalysis self.name = self.method.get_name() self.lparams = [] self.basic_blocks = [bb for bb in methanalysis.basic_blocks.get()] self.var_to_name = {} self.writer = None access = self.method.get_access_flags() self.access = [ flag for flag in util.ACCESS_FLAGS_METHODS if flag & access ] desc = self.method.get_descriptor() self.type = util.get_type(desc.split(')')[-1]) self.params_type = util.get_params_type(desc) self.exceptions = methanalysis.exceptions.exceptions code = self.method.get_code() if code is None: util.log( 'No code : %s %s' % (self.name, self.method.get_class_name()), 'debug') else: start = code.registers_size - code.ins_size if 0x8 not in self.access: self.var_to_name[start] = ThisParam(start, self.name) self.lparams.append(start) start += 1 num_param = 0 for ptype in self.params_type: param = start + num_param self.lparams.append(param) self.var_to_name.setdefault(param, Param(param, ptype)) num_param += util.get_type_size(ptype) def process(self): util.log('METHOD : %s' % self.name, 'debug') if 0: from androguard.core import bytecode bytecode.method2png('/tmp/graphs/%s#%s.png' % \ (self.method.get_class_name().split('/')[-1][:-1], self.name), self.metha) graph = construct(self.basic_blocks, self.var_to_name, self.exceptions) self.graph = graph if graph is None: return if 0: util.create_png(self.basic_blocks, graph, '/tmp/blocks') #'dad_graphs/blocks') defs, uses = build_def_use(graph, self.lparams) dead_code_elimination(graph, uses, defs) register_propagation(graph, uses, defs) # After the DCE pass, some nodes may be empty, so we can simplify the # graph to delete these nodes. # We start by restructuring the the graph by spliting the conditional # nodes into a pre-header and a header part. # We then simplify the graph by merging multiple statement nodes into # a single statement node when possible. This also delete empty nodes. graph.split_if_nodes() graph.simplify() graph.reset_rpo() idoms = immediate_dominator(graph) identify_structures(graph, idoms) if 0: util.create_png(self.basic_blocks, graph, '/tmp/structured') # 'dad_graphs/structured') self.writer = Writer(graph, self) self.writer.write_method() def show_source(self): if self.writer is not None: print self.writer def get_source(self): if self.writer is not None: return '%s' % self.writer return '' def __repr__(self): return 'Method %s' % self.name
def crawling(self, category_name): # Multi Process PID print(category_name + " PID: " + str(os.getpid())) writer = Writer(category_name=category_name, date=self.date) # 기사 URL 형식 url = "http://news.naver.com/main/list.nhn?mode=LSD&mid=sec&sid1=" + str( self.categories.get(category_name)) + "&date=" # start_year년 start_month월 ~ end_year의 end_month 날짜까지 기사를 수집합니다. day_urls = self.make_news_page_url(url, self.date['start_year'], self.date['end_year'], self.date['start_month'], self.date['end_month'], self.date['start_day'], self.date['end_day']) print(category_name + " Urls are generated") print("The crawler starts") for URL in day_urls: regex = re.compile("date=(\d+)") news_date = regex.findall(URL)[0] request = self.get_url_data(URL) document = BeautifulSoup(request.content, 'html.parser') # html - newsflash_body - type06_headline, type06 # 각 페이지에 있는 기사들 가져오기 post_temp = document.select('.newsflash_body .type06_headline li dl') post_temp.extend(document.select('.newsflash_body .type06 li dl')) # 각 페이지에 있는 기사들의 url 저장 post = [] for line in post_temp: post.append(line.a.get('href')) # 해당되는 page에서 모든 기사들의 URL을 post 리스트에 넣음 del post_temp for content_url in post: # 기사 URL # 크롤링 대기 시간 sleep(0.01) # 기사 HTML 가져옴 request_content = self.get_url_data(content_url) try: document_content = BeautifulSoup(request_content.content, 'html.parser') except: continue try: # 기사 제목 가져옴 tag_headline = document_content.find_all('h3', {'id': 'articleTitle'}, {'class': 'tts_head'}) text_headline = '' # 뉴스 기사 제목 초기화 text_headline = text_headline + ArticleParser.clear_headline( str(tag_headline[0].find_all(text=True))) if not text_headline: # 공백일 경우 기사 제외 처리 continue # 기사 본문 가져옴 tag_content = document_content.find_all('div', {'id': 'articleBodyContents'}) text_sentence = '' # 뉴스 기사 본문 초기화 text_sentence = text_sentence + ArticleParser.clear_content(str(tag_content[0].find_all(text=True))) if not text_sentence: # 공백일 경우 기사 제외 처리 continue # # 기사 언론사 가져옴 # tag_company = document_content.find_all('meta', {'property': 'me2:category1'}) # text_company = '' # 언론사 초기화 # text_company = text_company + str(tag_company[0].get('content')) # if not text_company: # 공백일 경우 기사 제외 처리 # continue # # # CSV 작성 # wcsv = writer.get_writer_csv() # wcsv.writerow([news_date, category_name, text_company, text_headline, text_sentence, content_url]) # # del text_company, text_sentence, text_headline # del tag_company # del tag_content, tag_headline # del request_content, document_content # CSV 작성 wcsv = writer.get_writer_csv() wcsv.writerow([text_sentence]) del text_headline del tag_headline del request_content, document_content except Exception as ex: # UnicodeEncodeError .. # wcsv.writerow([ex, content_url]) del request_content, document_content pass writer.close()
from ssd1306 import SSD1306_SPI WIDTH = const(128) HEIGHT = const(64) pdc = machine.Pin(27, machine.Pin.OUT) pcs = machine.Pin(26, machine.Pin.OUT) sck_pin = machine.Pin(19, machine.Pin.OUT) mosi_pin = machine.Pin(23, machine.Pin.OUT) miso_pin = machine.Pin(25, machine.Pin.IN) prst = machine.Pin(18, machine.Pin.OUT) spi = machine.SPI(1, baudrate=1000000, sck=sck_pin, mosi=mosi_pin, miso=miso_pin) ssd = SSD1306_SPI(WIDTH, HEIGHT, spi, pdc, prst, pcs) import freesans20 from writer import Writer wri2 = Writer(ssd, freesans20, verbose=True) Writer.set_clip(True, True) Writer.set_textpos(0, 0) wri2.printstring('MicroPython\n') ssd.show()
def combo_write(): reader = Reader() writer = Writer(reader) writer.write_unshown()
def test(): pcs = machine.Pin('Y5', machine.Pin.OUT_PP, value=0) # Active high spi = machine.SPI(2) ssd = SHARP(spi, pcs) rhs = ssd.width - 1 ssd.line(rhs - 80, 0, rhs, 80, 1) square_side = 40 ssd.fill_rect(rhs - square_side, 0, square_side, square_side, 1) wri = Writer(ssd, freesans20) Writer.set_textpos(ssd, 0, 0) # verbose = False to suppress console output wri.printstring('Sunday\n') wri.printstring('12 Aug 2018\n') wri.printstring('10.30am') wri = Writer(ssd, arial_50) Writer.set_textpos(ssd, 0, 120) wri.printstring('10:30') ssd.show()
def handle_command(command, details, channel, respond = True): """ Receives commands directed at the bot and determines if they are valid commands. If so, then acts on the commands. If not, returns back what it needs for clarification. """ response = False if command == "learn": learner = Learner() response = learner.learn(details[0], " ".join(details[1:])) elif command == "unlearn": learner = Learner() content = None if len(details) > 1: content = " ".join(details[1:]) response = learner.unlearn(details[0], content) elif command == "commands": learner = Learner() response = learner.list_commands() elif command == "list": learner = Learner() response = learner.list(details[0]) elif command == "cowsay": out = subprocess.check_output(['cowsay', " ".join(details)]) response = "```" + out + "```" elif command == "meme": memer = Memer() if not details or len(details) == 0: response = memer.list_templates() else: template = details.pop(0).strip() parts = [x.strip() for x in " ".join(details).split(",")] top = parts[0] if len(parts) > 0 else None bottom = parts[1] if len(parts) > 1 else None response = memer.get_meme(template, top, bottom) elif command == "hostname": response = "slurms coming to you live from: `%s (%s)`" % (subprocess.check_output("hostname -A", shell=True).strip(), subprocess.check_output("hostname -i", shell=True).strip()) elif command == "write": writer = Writer() response = writer.get_writing(" ".join(details)) elif command == "imglearn": learner = Learner() imgur = Imgur() image_url = imgur.save_from_url(" ".join(details[1:])) response = learner.learn(details[0], image_url) elif command == "++" or command == "endorse": plusser = Plusser() reason = "" if len(details) > 1: reason = " ".join(details[1:]) response = plusser.plus(details[0], reason) elif command == "plusses": plusser = Plusser() response = plusser.get(details[0]) elif command == "leaders" or command == "leader_board": plusser = Plusser() response = plusser.leader_board() elif command == "monthly_leaders" or command == "monthly_leader_board": plusser = Plusser() months_ago = 0 if details and len(details) > 0: months_ago = details[0] response = plusser.monthly_leader_board(months_ago) elif command == "youtube": query = " ".join(details) videos = youtube.youtube_search(query) if len(videos) > 0: response = videos[-1] else: response = "sorry, couldnt find any videos for %s" % query elif command == "echo": response = " ".join(details) elif command == "pipe": pipe(command, details, channel) elif command == "doom": doom = Doom() response = doom.doom(details) else: """ see if a randomly entered command is something that was previously learned """ learner = Learner() response = learner.get(command) if response and respond: slack_client.api_call("chat.postMessage", channel=channel, text=response, as_user=True) elif not respond: return response
UNIX_TS_CONVERT = 946684800 # necessary to convert 'epoch since 01-01-2000' to 'epoch since 01-01-1970' SCL_PIN = 4 # D2 SDA_PIN = 0 # D3 DHT_PIN = 12 # D6 LED1_PIN = 2 # D4 # LED2_PIN = 16 # D0 LED1 = machine.Pin(LED1_PIN, machine.Pin.OUT) # LED2 = machine.Pin(LED2_PIN, machine.Pin.OUT) RTC = machine.RTC() DHT = dht.DHT22(machine.Pin(DHT_PIN)) WLAN = network.WLAN(network.STA_IF) WLAN_AP = network.WLAN(network.AP_IF) Token = "" I2C = machine.I2C(scl=machine.Pin(SCL_PIN), sda=machine.Pin(SDA_PIN)) OLED = ssd1306.SSD1306_I2C(128, 64, I2C, 0x3c) WRI = Writer(OLED, font_sensor_35) WRI_BT = Writer(OLED, font_sensor_20) with open('env.json') as fp: SECRETS = ujson.loads(fp.read()) fp.close() SENSOR_ID = SECRETS['sensor']['name'].upper() client = MQTTClient(client_id=SENSOR_ID, server=SECRETS['mqtt']['host'], port=SECRETS['mqtt']['port'], user=SECRETS['mqtt']['user'], password=SECRETS['mqtt']['pass']) WLAN_AP.active(False) def main(): temp = float('NaN')
def update_main_area(msg): Writer.set_textpos(OLED, row=0, col=0) WRI.printstring(msg) OLED.show()
) # Create the plotter, this object will render all plots to the notebook. plotter = Plotter( datasets=datasets, z_samples=z_samples, test_s=0.9, train_s=0.2, zline_s=5, zline_skip=datasets.x_test.shape[0] // 50, ) # Create the writer, this object will write information that can be browsed using tensorboard. writer = Writer( datasets_target_function=datasets.target_function_desc, trainer_params=trainer.params_desc, datasets_params=datasets.params_desc, ) # Create the tester, this object will run goal 1, goal 2 and emd tests. tester = Tester( experiment=experiment, z_samples=z_samples, datasets=datasets, plotter=plotter, writer=writer, model=model, device=device, ) # %%