def one(rule): os.chdir('sdks/%s' % rule.DIRECTORY) if os.popen('git status -s .').read(): print 'directory is not clean' os.system('git status') return os.system('git clean -f -d .') # icon path os.environ['ICON_DIRECTORY'] = rule.ICON_PATH if not os.path.isabs( rule.ICON_PATH ): os.environ['ICON_DIRECTORY'] = os.path.abspath( rule.ICON_PATH ) process(rule.rules()) # preview replaces #os.system('git diff -p --raw .') os.system('ant clean linkassets release') # TODO copy package d = '$HOME/android_package/%s/%s' % (rule.APPLABEL, rule.VERSION_CODE) os.system('mkdir -p %s'%d) output = '%s/%s_%s.apk'%(d, rule.CH_NAME, rule.VERSION_CODE) os.system('cp bin/poem-release.apk %s'%output) os.system('git clean -f -d .') os.system('git checkout -- .') os.chdir('../..')
def askme(command): conn = sqlite3.connect("assistant.db") c = conn.cursor() try: a = c.execute( f"SELECT reply from command_reply WHERE command like '{command}'") b = a.fetchall() d = [] for i in str(b): d.append(str(i)) for i in range(0, 4): d.pop() e = "" for i in range(3, len(d)): e = e + str(d[i]) process(e) except IndexError: speak("sorry sir, i couldn't understand that") speak("do you want me to save this?") ans = takecommand() if ans == "yes": speak("ok, sir! please tell me the reply of this command") reply = takecommand() c.execute( f"insert into command_reply values('{command}','{reply}')") speak("saved it sir") elif ans == "no": speak("ok, sir") conn.commit() c.close() conn.close()
def parse(url): api_path = 'http://yle.fi/ylex/api/article/' _id = url.split('/')[-1] r = requests.get(api_path + _id) if r.status_code == 404: return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u'']) r.encoding = 'UTF-8' json = r.json() categories = [processor.process(json['homesection']['name'])] datetime_list = processor.collect_datetime_json(json, 'datePublished', 'dateModified') author = processor.process(json['authors'][0]['name']) title = processor.process(json['title']) ingress = processor.process(json['lead']) text_html = BeautifulSoup(json['html'], "html.parser") text = processor.collect_text(text_html) if 'image' in json: image_json = json['image'] images = [image_json['uri']] captions = [image_json['alt']] else: images, captions = [u''], [u''] return processor.create_dictionary('Yle X', url, r.status_code, categories, datetime_list, author, title, ingress, text, images, captions)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, help='input interface to listen incoming flows', required=True) parser.add_argument('-d', '--database', type=str, help='database interface to pull map and push stats', default=None) parser.add_argument('-t', '--hours', type=int, help='DB polling period in hours (default: %(default)s)', default=24) parser.add_argument('-s', '--sources', type=int, help='source checking period in minutes (default: %(default)s)', default=1) parser.add_argument('-n', '--num', type=int, help='number of source checking periods (-s) to declare inactivity (default: %(default)s)', default=5) parser.add_argument('-w', '--web', type=int, help='port number to open web interface on', default=None) args = parser.parse_args() if args.database: hostname, port = processor.parseaddr(args.database, 'mysql', 'DB connection') else: hostname = None port = None if args.num <= 0: print "Number of checking periods must be positive. Got %d."%(args.num) return -1 try: processor.process(args.input, hostname, port, args.hours, args.sources, args.num, args.web) except Exception, e: traceback.print_exc() print "Error: %s"%(str(e))
def parse(url): api_path = 'https://www.kauppalehti.fi/api/news/article/' _id = url.split('/')[-1] r = requests.get(api_path + _id) if r.status_code == 404: return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u'']) r.encoding = 'UTF-8' json = r.json() categories = [processor.process(json['mainCategory']['name'])] datetime_list = processor.collect_datetime_json(json, 'published', 'modified') author = processor.process(json['byline'][0]) title = processor.process(json['title']) ingress = processor.process(json['headline']) text_html = BeautifulSoup(json['body'], "html.parser") text = processor.collect_text(text_html) if 'keyImage' in json: image_url = 'http://images.kauppalehti.fi/547x/http:' + json['keyImage'] images = [image_url] else: images = [u''] return processor.create_dictionary('Kauppalehti', url, r.status_code, categories, datetime_list, author, title, ingress, text, images, [u''])
def argparse_exec(args): try: with os.fdopen(3, 'r') as f: mapper = f.read() except OSError as e: print >> sys.stderr, 'Try 3<map.py' return sys.exit(1) processor = ProcessorMapper(mapper, args.kinds) processor.process()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, help='input interface to listen incoming flows', required=True) parser.add_argument('-d', '--database', type=str, help='database interface to pull map and push stats', default=None) parser.add_argument( '-t', '--hours', type=int, help='DB polling period in hours (default: %(default)s)', default=24) parser.add_argument( '-s', '--sources', type=int, help='source checking period in minutes (default: %(default)s)', default=1) parser.add_argument( '-n', '--num', type=int, help= 'number of source checking periods (-s) to declare inactivity (default: %(default)s)', default=5) parser.add_argument('-w', '--web', type=int, help='port number to open web interface on', default=None) args = parser.parse_args() if args.database: hostname, port = processor.parseaddr(args.database, 'mysql', 'DB connection') else: hostname = None port = None if args.num <= 0: print "Number of checking periods must be positive. Got %d." % ( args.num) return -1 try: processor.process(args.input, hostname, port, args.hours, args.sources, args.num, args.web) except Exception, e: traceback.print_exc() print "Error: %s" % (str(e))
def set(self, instance, value, **kwargs): """Mutator. ``value`` is a list of UIDs or one UID string to which I will add a relation to. None and [] are equal. """ rc = getToolByName(instance, atconfig.REFERENCE_CATALOG) relationslib = getToolByName(instance, RELATIONS_LIBRARY) targetUIDs = [ ref.targetUID for ref in rc.getReferences(instance, self.relationship) ] if (not self.multiValued and value and type(value) not in (ListType, TupleType)): value = (value, ) if not value: value = () #convert objects to uids if necessary uids = [] for v in value: if type(v) in StringTypes: uids.append(v) else: uids.append(v.UID()) add = [v for v in uids if v and v not in targetUIDs] sub = [t for t in targetUIDs if t not in uids] # prepare triples sUid = instance.UID() addtriples = [] subtriples = [] # build connect triples for tUid in add: addtriples.append((sUid, tUid, self.relationship)) # build disconnect triples for tUid in sub: subtriples.append((sUid, tUid, self.relationship)) # do the job processor.process(relationslib, connect=addtriples, disconnect=subtriples) if self.callStorageOnSet: #if this option is set the reference fields's values get written #to the storage even if the reference field never use the storage #e.g. if i want to store the reference UIDs into an SQL field ObjectField.set(self, instance, self.getRaw(instance), **kwargs)
def argparse_exec(args): op = upsert if args.operation == 'remove': op = remove processor = BatchProcessor(args.dataset, args.kinds, args.namespace, op, block_size=args.block or 500, parallel=args.parallel or 10) processor.process()
def set(self, instance, value, **kwargs): """Mutator. ``value`` is a list of UIDs or one UID string to which I will add a relation to. None and [] are equal. """ rc = getToolByName(instance, atconfig.REFERENCE_CATALOG) relationslib = getToolByName(instance, RELATIONS_LIBRARY) targetUIDs = [ref.targetUID for ref in rc.getReferences(instance,self.relationship)] if (not self.multiValued and value and type(value) not in (ListType, TupleType)): value = (value,) if not value: value = () #convert objects to uids if necessary uids=[] for v in value: if type(v) in StringTypes: uids.append(v) else: uids.append(v.UID()) add = [v for v in uids if v and v not in targetUIDs] sub = [t for t in targetUIDs if t not in uids] # prepare triples sUid=instance.UID() addtriples=[] subtriples=[] # build connect triples for tUid in add: addtriples.append( (sUid, tUid, self.relationship) ) # build disconnect triples for tUid in sub: subtriples.append( (sUid, tUid, self.relationship) ) # do the job processor.process(relationslib, connect=addtriples, disconnect=subtriples) if self.callStorageOnSet: #if this option is set the reference fields's values get written #to the storage even if the reference field never use the storage #e.g. if i want to store the reference UIDs into an SQL field ObjectField.set(self, instance, self.getRaw(instance), **kwargs)
def process(self): criteria = {} try: criteria['PAX'] = criteria['PAY'] = float(self.crit_A.get()) criteria['PBX'] = criteria['PBY'] = float(self.crit_B.get()) criteria['PXX'] = criteria['PXY'] = float(self.crit_x.get()) except: messagebox.showerror('Error', 'Invalid Criteria!') return processor.process(self.file_path, criteria, self.test_name.get()) self.go_button['state'] = 'disabled' self.pathvar.set('No file selected...')
def collect(): context = zmq.Context() # Socket to receive messages on (collect results from worker) receiver = context.socket(zmq.PULL) receiver.bind(democfg.routing_table["sender"]) # Socket for worker control controller = context.socket(zmq.PUB) controller.bind(democfg.routing_table["controller"]) # Wait for start signal assert receiver.recv() == democfg.start_flag processor.process(receiver) # Let workers know that all results have been processed controller.send(democfg.done_msg) # Finished, but give 0MQ time to deliver time.sleep(5 * democfg.pause_time)
def main(): global message content = "" f = open("../resource/template.html") line = f.readline() while line: content = content + line line = f.readline() f.close() options = read_args() zc = ZkClient(options.zserver, options.zport) my_topology_list = zc.get_children("/") for single_topo in my_topology_list: api = crawlUrl + "/api/v1/topology/" + single_topo + "?sys=false" read = urllib.urlopen(api).read() storm_ui_data = json.loads(read) if not re.search("walle",single_topo): continue consumer_partition = "/" + single_topo + "/partition_0" print "consumer_partition" + consumer_partition tuple = zc.get_node(consumer_partition)[0] j = json.loads(tuple) toponame = j['topology']['name'] try: zk_data = process(zc.spouts(consumer_partition, toponame),storm_ui_data) except ZkError, e: print 'Failed to access Zookeeper: %s' % str(e) return 1 except ProcessorError, e: print 'Failed to process: %s' % str(e) return 1
def curses_main(window, args): zc = args[0] options = args[1] aggregator = SummaryAggregator(options.topology, options.zserver + ':' + str(options.zport)) while True: last_update = datetime.datetime.utcnow() spouts = zc.spouts(options.spoutroot, options.topology) summary = process(spouts) aggregator.add_summary(summary, datetime.datetime.utcnow()) header_lines = aggregator.get_header_lines() partition_data_lines = aggregator.get_partition_data_lines() if window is not None: window.erase() curses_display(window, header_lines, 0, 0) curses_display(window, partition_data_lines, 0, len(header_lines) + 1) window.refresh() else: print '\r\n'.join(header_lines) print print '\r\n'.join(partition_data_lines) delta = get_delta(last_update) sleep_time = options.update_interval - delta if sleep_time > 0.0: time.sleep(sleep_time)
def parse_from_archive(url, content): article = BeautifulSoup( content, "html.parser" ) if article == None: return processor.create_dictionary('Lapin kansa', url, 404, [u''], [u''], u'', u'', u'', u'', [u''], [u'']) meta = article.find( class_ = 'hakutuloslahde' ) datetime_list = processor.collect_datetime( meta ) categories = [processor.collect_text( meta ).split(',')[1].strip()] author = processor.collect_text( article.find( class_ = 'signeeraus' ) ) title = processor.collect_text( article.find( class_ = 'otsikko' ) ) ingress = processor.collect_text( article.find_all( class_ = 'jalkirivi')[1] ) ingress += ' ' + processor.collect_text( article.find( class_ = 'esirivi' ) ) ingress = ingress.strip() text_divs = article.find_all( class_ = 'artikkelip') text = '' for text_content in text_divs: text += processor.collect_text(text_content) + ' ' text = processor.process( text.strip() ) text += processor.collect_text( article.find( class_ = 'korjaus' ) ) captions = processor.collect_image_captions( article.find_all( class_ = 'kuva') ) return processor.create_dictionary('Lapin kansa', url, 200, categories, datetime_list, author, title, ingress, text, [u''], captions)
def run(): print("start") listener = Recog() tagger = Tagger() print("done setting up") while True: try: sentence = listener.listen() # sentence = "make line graph using range from A1 to E4" tags = tagger.match_rules(sentence) print(tags) process(tags) except KeyboardInterrupt: break except Exception as e: continue
def test_assert_input__eq__output(session, state): input_data = { "contact": { "$ref": CONTACT_ID, "$comment": "My comment from the call form" }, "transaction": { "fired": "2018-12-27T11:41:40.249Z", } } call_id, result = process(session, input_data) assert call_id assert result contact = session.get_entry(Contact.module, CONTACT_ID) call = session.get_entry(Call.module, call_id) for field in FIELDS: found = list( filter(lambda options: options.get('name') == field, result)) assert found assert len(found) == 1 assert found[0].get('value') == getattr(call, field)
def get_processed(a1=18, a2=24, p1=0, p2=8, l=10000, g='top-1,top-10%25,top-15%25,theory'): # processor is imported in functions to avoid deadlock when running # test_process in processor.py since that imports this module. import processor if not os.path.exists('cached_data'): os.makedirs('cached_data') processed = {} a1 = int(a1) a2 = int(a2) p1 = int(p1) p2 = int(p2) l = int(l) g = urllib.unquote(g).decode('utf8') goals = g.split(',') for goal in goals: filename = "cached_data/a1%ia2%ip1%ip2%il%i-%s.json" % (a1, a2, p1, p2, l, goal) processed_goal = [] if os.path.isfile(filename): with open(filename) as fhandler: processed_goal = ujson.load(fhandler) else: compatibilities = get_compatibilities(a1, a2, p1, p2, l) processed_goal = list(processor.process(compatibilities, lifetimes=l, goal=goal)) with open(filename, 'w') as fhandler: ujson.dump(processed_goal, fhandler) processed[goal] = processed_goal return processed
def run(depth_arr, rgb_arr): # Read images and resize so they can be directly overlaid. depth = np.array(depth_arr, dtype=np.float64).reshape(TOF_SHAPE) scale_factor = 2 depth = np.kron(depth, np.ones((scale_factor, scale_factor))) rgb = transform.resize(skio.imread(io.BytesIO(rgb_arr)), SHAPE) angle, left, right, est_depth, est_width = processor.process(depth, rgb) # Prepare display image bounds = np.zeros(SHAPE) bounds[:, left] = 1 bounds[:, right] = 1 bounds_rot = interpolation.rotate(bounds, -angle, reshape=False) rgb_disp = np.append(rgb, depth[:, :, np.newaxis], axis=2) rgb_disp[np.where(np.abs(bounds_rot) > 0.1)] = [0.0, 1.0, 0.0, 1.0] # img_rgbd_rotated = interpolation.rotate(rgbd, angle, reshape=False) # rgb_disp = np.copy(img_rgbd_rotated) # rgb_disp[:,left-1:left+1] = [1.0, 0.0, 0.0, 1.0] # rgb_disp[:,right-1:right+1] = [1.0, 0.0, 0.0, 1.0] # rgb_disp = interpolation.rotate(rgb_disp, -angle, reshape=False) rgb_disp = np.clip(rgb_disp, 0, 1) # TODO: pytype # Java expects: byte[] displayImage, float estDepth, float estWidth return bytes(img_as_ubyte(rgb_disp)), est_depth, est_width
def process(self, in_file, work_dir, out_dir, out_filename): logger = self.get_logger() out_file = os.path.join(out_dir, out_filename) for info, progress in processor.process(in_file, work_dir, out_file): self.update_state(state='PROGRESS', meta={ 'info': info, 'progress': progress }) return out_filename
def main(): options = read_args() zc = ZkClient(options.zserver, options.zport) try: zk_data = process(zc.spouts(options.spoutroot, options.topology)) except ZkError, e: print 'Failed to access Zookeeper: %s' % str(e) return 1
def main(args): if args.sig is not None: T = curver.triangulation_from_sig(args.sig) else: T = curver.load(args.genus, max(args.punctures, 1)).triangulation P = Polyhedron.from_triangulation(T, args.upper, zeros=args.zeros) num_integral_points = P.integral_points_count(triangulation='cddlib') print(P) try: print('Polytope dimension: {}'.format(P.as_sage().dimension())) except AttributeError: print('Polytope dimension: Unknown') print('Drawing from [0, {})'.format(num_integral_points)) common = dict(T=T, P=P, closed=args.punctures == 0) iterable = (dict(index=randrange(0, num_integral_points)) for _ in range(args.num)) process(from_index, common, iterable, cores=args.cores, path=args.output)
def main(): try: print_title () P = Params() P.load() P.check_all() files_to_rename = get_files_to_rename(P.INPUT_DIRS, P.VIDEO_EXTENSIONS) actions_to_process = preprocessor.preprocess( files = files_to_rename, language = P.LANGUAGE, output_dir = P.OUTPUT_DIR ) processor.process( to_process = actions_to_process, config_path = P.get_path(expanded=True), ACTION = P.ACTION ) except KeyboardInterrupt: print() log.info('Aborting.') exit(1) except ConnectionError: log.fail('Lost connection. Aborting.') exit(2) except ConnectionRefusedError: log.fail('Lost connection. Aborting.') exit(2) except KeyError as e: if e == 'EDITOR': log.fail('Could not find the environment variable EDITOR. Aborting.') exit(1) else: log.fail('Uncaught KeyError exception: %s. Aborting.' % e.args[0]) exit(2)
def run_main(): global clusters mode = Mode.get() new_clusters, new_point = process(points, clusters, mode) clear_cluster() new_clusters = [new_clusters[i] for i in range(len(new_clusters))] clusters = deepcopy(new_clusters) draw_result(clusters, new_point) print("Mode:", mode) print("Clusters coords:", clusters)
def main(training_dir, test_file, output_file): print('Training...') training_data = train(training_dir) print('Processing...') final_data = process(training_data, test_file) print('Writing results...') output_result(final_data, output_file) print('Done.')
def main(): options = read_args() zc = ZkClient(options.zserver, options.zport) try: display(process(zc.spouts(options.spoutroot, options.topology)), true_or_false_option(options.friendly)) except ZkError, e: print 'Failed to access Zookeeper: %s' % str(e) return 1
def test_invalid_input2(session, state): input_data = { "contact": { "comment": "My comment from the call form" }, "transaction": { "fired": "2018-12-27T11:41:40.249Z" } } call_id, result = process(session, input_data) assert not call_id
def run(path, mapping_path, idf_dict_path, db_password, do_db, do_mapping, clean_mapping, do_idf_dict, clean_idf_dict, drop): # Check if xml path exists if not os.path.exists(path): log_exit('Main', 'XML data directory path {} does not exist'.format(path)) # Clean mapping if os.path.exists(mapping_path) and clean_mapping: os.remove(mapping_path) # Create mapping if not exists if (not os.path.exists(mapping_path)) and do_mapping: mapping = mapping_collector.get_mapping(path) mapping_collector.dump(mapping, mapping_path) # Clean idf dict if os.path.exists(idf_dict_path) and clean_idf_dict: os.remove(idf_dict_path) # Create idf dict if not exist if (not os.path.exists(idf_dict_path)) and do_idf_dict: idf_dict = idf_dict_collector.get_idf_dict(path) idf_dict_collector.dump(idf_dict, idf_dict_path) # Read mapping if not os.path.exists(mapping_path): log_exit('Main', 'Mapping file {} does not exist'.format(mapping_path)) with open(mapping_path, 'r') as file: mapping = json.load(file) if not os.path.exists(idf_dict_path): log_exit('Main', 'Idf dict file {} does not exist'.format(idf_dict_path)) with open(idf_dict_path, 'r') as file: idf_dict = json.load(file) # Processing and dump into database if do_db: processor.process(mapping, idf_dict, path, db_password, drop)
def test_entire_flow(web3, prepared_contracts, creator, input_file): airdropper, omg_token = prepared_contracts airdrops = process(input_file.read()) transactions = creator.create_txs(airdrops, BATCH_SIZE) # this being a long-running test, the unlocking from web3 fixture might have expired web3.personal.unlockAccount(web3.eth.accounts[0], "") signed = Signer(web3).sign_transactions(transactions) Sender(web3).send_transactions(signed, transactions) check_entirely_airdropped(airdrops, omg_token)
def on_post(self, req, resp): try: if req.content_length: body = json.load(req.stream) exc = processor.process(cfg.get('processing', lower=True), body) resp.body = exc resp.status = falcon.HTTP_200 except Exception as exc: logging.error('Server Error: %s', exc.message) logging.debug('Server Error: %s', exc.message, exc_info=1) resp.body = {"Sever Error": exc.message} resp.status = falcon.HTTP_500 resp.body = json.dumps(resp.body)
def main(input_path, output_path, max_files, max_events, site_altitude, source_type, telescopes, location, chop, apply_quality_cuts): print("Checking inputs...") # Process click inputs as useable parameters input_path, output_path, types, site_location, choppoints, id_no = process_inputs( input_path, output_path, source_type, location, chop) print() print('--------------------------') print("Input path:", input_path) print("Output path:", output_path) print("Max files:", max_files) print("Max events:", max_events) print("Specific site altitude:", site_altitude) print("Source type:", source_type) print("Telescopes:", telescopes) print("Location:", location) print("Chop:", chop) print("Apply quality cuts:", apply_quality_cuts) print('--------------------------') print() # Check the validity of inputs input_validity = validate(input_path, output_path, max_files, max_events, site_altitude, source_type, telescopes, location, chop, types) if input_validity != "Valid": sys.exit(input_validity) print("Everything's good, let's get started...") # Process the simulations processor.process(input_path, output_path, max_files, max_events, site_altitude, types, telescopes, site_location, choppoints, id_no, apply_quality_cuts) print("Finished")
def main(args): input = args.input threshold = args.threshold with pdfplumber.open(input) as pdf: page_number = 1 for page in pdf.pages: page_content = page.extract_text() if page_content == None: continue # print(page_content) word_scores = process(page_content) print('Page {0:3} -'.format(page_number), get_difficult(word_scores, threshold)) page_number += 1
def getpic(r, uid, context): 'retrives and passes all arguments to the image processor' username = list(sql.get_user(uid)[0])[1] songname = r['item']['name'] albumname = r['item']['album']['name'] totaltime = r['item']['duration_ms'] crrnttime = r['progress_ms'] coverart = requests.get(r['item']['album']['images'][1]['url']) artists = ', '.join([x['name'] for x in r['item']['artists']]) try: pfp = context.bot.getUserProfilePhotos(uid, limit=1)['photos'][0][0]['file_id'] user = requests.get(context.bot.getFile(pfp).file_path) except: user = requests.get('https://files.catbox.moe/jp6szj.jpg') return(processor.process(username, songname, albumname, artists, crrnttime, totaltime, user, coverart))
def convert_worker(self, *args): #construct the saving file path save_path = self.save_file_path + 'c' + self.orgName + ' ' + self.year + '-' + self.month + '.csv' self.year = int(self.year) self.month = int(self.month) # call the process function to start converting self.stat_message, self.result_message = processor.process( self.orgName, self.year, self.month, self.file_names, save_path) #if everything runs well, you should be able to see the message box mBox._show( 'Result', 'This is the result \n: %s \n %s' % (self.stat_message, self.result_message))
def print_records(url): url = url.strip() resp = requests.get(url, stream=True) for record in ArchiveIterator(resp.raw): if record.rec_type == 'warcinfo': pass elif record.rec_type == 'response': # print(record.rec_headers) if not record.http_headers: continue if record.http_headers.get_header('Content-Type') == 'text/html': try: soup = BeautifulSoup( record.content_stream().read().decode("utf-8")) except Exception as e: # print(e) continue # Process record here, maybe spacy text = processor.process(soup) counts = word_count(text) top_3_words = [ x[0] for x in sorted(counts.items(), key=operator.itemgetter(1), reverse=True)[:2] ] node = record.rec_headers.get_header('WARC-Target-URI') outlinks = ",".join( [link['href'] for link in soup.find_all('a', href=True)]) msg = bytes( ujson.dumps({ "Node": node, "Keywords": ",".join(top_3_words), "Outlinks": outlinks, "Score": 1.0 }), "utf-8") socket.send(msg)
def handleF(self): #function need to be changed for 404 to be returned properly os.chdir(START) path, params = self.get_parameters(self.path) if (not ".ico" in path): path = SOURCE + path if ((path == "" or "." not in path[-4:-1]) and os.path.isfile(path + "index.psp")): path += "index.psp" if (not os.path.isfile(path)): with open(os.getcwd() + "//404.html") as file: print("404 not found!") self.send_response(200) self.wfile.write(file.read()) self.send_header("Content-type", "text/html") return req = request("GET", self) self.send_response(200) self.send_header("Access-Control-Allow-Origin", "*") if ("psp" in path.split(".")[1]): st = processor.process(path, params, req) self.send_header("Content-type", "text/html") elif ("jpg" in path.split(".")[1].lower()): with open(path, "rb") as file: st = file.read() self.send_header("Content-type", "image/jpeg") elif ("png" in path.split(".")[1].lower()): with open(path, "rb") as file: st = file.read() self.send_header("Content-type", "image/png") elif (path == "close" or path == "exit" or path == "stop"): raise KeyboardInterrupt() else: with open(path, "r") as file: st = file.read() self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(st)
def run(self): filename = self.config['raw_file'] self.fd = open(filename) logger.debug("Creating unpickler") self.unpickler = cPickle.Unpickler(self.fd) while True: try: logger.debug("data_collector is waiting for data.") self.data = self.unpickler.load() except EOFError, e: logger.debug("End of file.") self.fd.close() return self.prev = self.data logger.debug("std dev %.2e"%np.std(self.data)) parameters = {"brightness":-00, "contrast":2} self.data = processor.process(self.data, parameters, self.config) logger.debug("Emitting data ready to showing") logger.debug("std dev processed %.2e"%np.std(self.data)) self.emit(QtCore.SIGNAL("data_ready(PyQt_PyObject)"), self.data)
def main(): options = read_args() zc = ZkClient(options.zserver, options.zport) try: zk_data = process(zc.spouts(options.spoutroot, options.topology)) except ZkError as e: print 'Failed to access Zookeeper: %s' % str(e) return 1 except ProcessorError as e: print 'Failed to process: %s' % str(e) return 1 else: if options.postjson: post_json(options.postjson, zk_data) else: display(zk_data, true_or_false_option(options.friendly)) return 0
def run(self): filename = self.config['raw_file'] self.fd = open(filename) logger.debug("Creating unpickler") self.unpickler = cPickle.Unpickler(self.fd) while True: try: logger.debug("data_collector is waiting for data.") self.data = self.unpickler.load() except EOFError, e: logger.debug("End of file.") self.fd.close() return self.prev = self.data logger.debug("std dev %.2e" % np.std(self.data)) parameters = {"brightness": -00, "contrast": 2} self.data = processor.process(self.data, parameters, self.config) logger.debug("Emitting data ready to showing") logger.debug("std dev processed %.2e" % np.std(self.data)) self.emit(QtCore.SIGNAL("data_ready(PyQt_PyObject)"), self.data)
def test_process_space_after_replaces_spaces_after_symbol_after_cursor(self): for symbol in self._space_after: self.assertIn((0, 2, ''), process('test' + symbol + '', ' '))
def test_process_ignored_symbols_returns_empty_modifications(self): for symbols in self._ignored_symbols: actual = process('test' + symbols, '') self.assertEqual(0, len(actual))
def test_process_no_space_symbols_removes_space(self): for symbols in self._no_space_symbols: actual = process('test ' + symbols[0] + ' ' + symbols[1:], '') self.assertIn((-1 - len(symbols), 0, symbols), actual)
def test_process_space_symbols_inserts_space(self): for symbols in self._space_symbols: actual = process('test ' + symbols.replace(' ', ''), '') self.assertIn((-len(symbols.replace(' ', '')), 0, symbols), actual)
def test_process_no_space_after_not_insert_spaces_after_symbol(self): for symbol in self._no_space_after: self.assertNotIn((0, 0, ' '), process('test' + symbol, ' '))
def test_process_space_after_replaces_spaces_after_symbol(self): for symbol in self._space_after: self.assertIn((-2, 0, ' '), process('test' + symbol + ' ', ''))
def test_process_space_after_not_replaces_spaces_after_symbol_with_char(self): for symbol in self._space_after: self.assertNotIn((0, 2, ''), process('test' + symbol + ' a', ' '))
def test_process_language_cases(self): for value in self._language_cases: self.assertEqual(value[3], process(value[0], value[1], value[2]), 'token: "' + value[0] + value[1] + '"')
def test_process_extra_cases(self): for value in self._extra_cases: self.assertEqual(value[2], process(value[0], value[1]), 'token: "' + value[0] + value[1] + '"')
def test_process_space_before_inserts_spaces_before_symbol(self): for symbol in self._space_before: actual = process('test' + symbol, '') self.assertIn((-len(symbol), -len(symbol), ' '), actual)
def test_process_space_after_inserts_space_after_symbol_before_char(self): for symbol in self._space_after: self.assertIn((-1, -1, ' '), process('test' + symbol + 'a', ''))
def test_process_no_space_before_not_removes_indentation(self): for symbol in self._no_space_before: actual = process(' ' + symbol, '') self.assertNotIn((-1 - len(symbol), -len(symbol), ''), actual)
def test_process_no_space_after_removes_spaces_after_symbol(self): for symbol in self._no_space_after: self.assertIn((0, 1, ''), process('test' + symbol, ' '))
def test_process_space_before_not_inserts_spaces_after_indentation(self): for symbol in self._space_before: actual = process(symbol, '') self.assertNotIn((-len(symbol), -len(symbol), ' '), actual)
def test_process_space_symbols_not_inserts_space(self): for symbols in self._space_symbols: actual = process('test ' + symbols, '') self.assertNotIn((-1, -1, ' '), actual)
def test_process_space_before_not_modify_space_before_symbol(self): for symbol in self._space_before: actual = process('test ' + symbol, '') self.assertNotIn((-1 - len(symbol), -1, ' '), actual)
def test_process_space_after_not_modify_space_after_symbol(self): for symbol in self._space_after: self.assertNotIn((-1, 0, ' '), process('test' + symbol + ' ', ''))