def compare_summary(base_root, target_root, name=None, comparefn=tuple_compare, show_added=True, show_removed=True, show_identical=True, show_paths=True): base_map = summary_to_map(summary.summarize(base_root, name)) target_map = summary_to_map(summary.summarize(target_root, name)) added, removed, changed, identical = get_key_lists(base_map, target_map, base_root, target_root) # no nonlocal in 2.7 have_output_hack = [False] def header_line(msg): if have_output_hack[0]: print else: have_output_hack[0] = True if msg: print msg if show_paths: header_line(None) print 'base root: ' + base_root print 'target root: ' + target_root if show_added and added: header_line('added') print_keys(added) if show_removed and removed: header_line('removed') print_keys(removed) if changed: header_line('changed') print_changed(changed, base_map, target_map, comparefn) if show_identical and identical: header_line('identical') print_keys(identical)
def get_text(): summ = [] if request.method == 'POST': if 'text' in request.form: text = request.form.get('text') size = int(request.form.get('copy-size')) summ = summarize(text, n=size) elif 'url' in request.form: url = request.form.get('url') print("MY URL: " + url) # title, text = getArticle(url) title = session['url-title'] text = session['url-text'] size = int(request.form.get('url-size')) summ = summarize(text, title, n=size) elif 'placeholder' in request.form: target = os.path.join(APP_ROOT, "uploads") if not os.path.isdir(target): os.mkdir(target) file = request.files['file'] size = int(request.form.get('upload-size')) if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = str(getRandomNumber(5)) + "_" + secure_filename( file.filename) print("Filename: " + str(filename)) destination = "\\".join([target, filename]) print(destination) file.save(destination) session['uploaded'] = destination text = getText(destination) summ = summarize(text, n=size) else: flash( "Unsupported file. Please upload only .docx, .pdf or .txt files." ) return redirect(request.url) session['summary'] = summ percentage = getPercentage(text, summ) topWords = getTopWords(text) return render_template("result.html", summaries=summ, percentage=percentage, words=topWords) print("FAILURE") return render_template("index.html")
def text_final(text): category= classification(text) #print ("Classify: ",classify) sentlist=[] sent_tokenize_list = sent_tokenize(text) #keyword=text_keywords(text) #print ("\nKeywors: ",keyw) summa= summarize(text,4) #print ("\nSumma: ",summa) possen="" negsen="" for senten in sent_tokenize_list: senti = text_sentiment(senten) if senti == 'pos': possen=possen+" "+senten else: negsen= negsen+" "+senten #print negsen possum = summarize(possen,2) negsum = summarize(negsen,2) #print '\n positive is ',possum #print '\n negative is ',negsum return category,summa,possum,negsum
def main(): parser = argparse.ArgumentParser(description="""Sniff for SSIDs and determine their potential physical location. To stop sniffing, send SIGINT to the process.""", epilog="""When using wigle, data is printed out in the following format: {latitude},{longitude}: {country} - {region} - {city} - {postalcode} - {street} - {house number} ({last update}) """) parser.add_argument('interface', metavar='INTERFACE', type=str, help="the wifi interface to use (must be capable of monitor mode") parser.add_argument('-c', '--censor', dest='censor', action='store_true', help="censor SSIDs, MAC addresses, and any other sensitive information") parser.add_argument('-w', '--wigle-auth', dest='auth_token', type=str, help="the auth code used for the wigle api") parser.add_argument('-m', '--mac-vendors', dest='mac_vendors', type=str, help="use a MAC to brand mapping to print the devices' brands") parser.add_argument('-s', '--summary', dest='summary', help="store all results to this json file") args = parser.parse_args() Config(args) try: wifi.monitor() except AssertionError: print("Switching wifi interface {} to monitor mode not possible, exiting".format(Config.interface), file=sys.stderr) __cleanup__() sniff = Sniff() try: sniff.start(callback) except KeyboardInterrupt: pass if Config.summary: summary.summarize() __cleanup__()
def get_event(uri): """ Input: uri (string representing an Event URI) """ q = QueryEvent( uri, requestedResult=RequestEventInfo( returnInfo=ReturnInfo(articleInfo=ArticleInfoFlags(concepts=True), eventInfo=EventInfoFlags(title=True, concepts=True, location=True, date=True, stories=False, socialScore=True, details=True, imageCount=1)))) data = er.execQuery(q) # Check if event errors if 'error' in data[uri].keys(): return {'error': uri + " is not a valid event URI"} if 'newEventUri' in data[uri].keys(): return { 'error': 'Data for event ' + uri + ' has been removed from the database' } # Else, build response else: event_dict = {} event_dict['URI'] = data[uri]['info']['uri'] event_dict['category'] = data[uri]['info']['categories'][0][ 'label'].split('/')[1] event_dict['date'] = data[uri]['info']['eventDate'] event_dict['image'] = data[uri]['info']['images'][0] event_dict['keywords'] = [ concept['label']['eng'] for concept in data[uri]['info']['concepts'][:5] ] event_dict['socialScore'] = data[uri]['info']['socialScore'] event_dict['title'] = data[uri]['info']['title']['eng'] try: event_dict['location'] = data[uri]['info']['location']['country'][ 'label']['eng'] except: event_dict['location'] = "N/A" event_dict['articles'] = fetch_article_urls(er, uri) try: event_dict['summary'] = summary.summarize( event_dict['articles'][0])['summary'] except: try: event_dict['summary'] = summary.summarize( event_dict['articles'][1])['summary'] except: try: event_dict['summary'] = summary.summarize( event_dict['articles'][2])['summary'] except: event_dict['summary'] = 'Summarization error' return {'event': event_dict}
def _get_summaries(function, *args): """Get a 2-tuple containing one summary from before, and one summary from after the function has been invoked. """ s_before = summary.summarize(get_objects()) function(*args) s_after = summary.summarize(get_objects()) return (s_before, s_after)
def create_summary(self): """Return a summary. See also the notes on ignore_self in the class as well as the initializer documentation. """ if not self.ignore_self: res = summary.summarize(muppy.get_objects()) else: # If the user requested the data required to store summaries to be # ignored in the summaries, we need to identify all objects which # are related to each summary stored. # Thus we build a list of all objects used for summary storage as # well as a dictionary which tells us how often an object is # referenced by the summaries. # During this identification process, more objects are referenced, # namely int objects identifying referenced objects as well as the # correspondind count. # For all these objects it will be checked wether they are # referenced from outside the monitor's scope. If not, they will be # subtracted from the snapshot summary, otherwise they are # included (as this indicates that they are relevant to the # application). all_of_them = [] # every single object ref_counter = {} # how often it is referenced; (id(o), o) pairs def store_info(o): all_of_them.append(o) if id(o) in ref_counter: ref_counter[id(o)] += 1 else: ref_counter[id(o)] = 1 # store infos on every single object related to the summaries store_info(self.summaries) for k, v in self.summaries.items(): store_info(k) summary._traverse(v, store_info) # do the summary res = summary.summarize(muppy.get_objects()) # remove ids stored in the ref_counter for _id in ref_counter.keys(): # referenced in frame, ref_counter, ref_counter.keys() if len(gc.get_referrers(_id)) == (3): summary._subtract(res, _id) for o in all_of_them: # referenced in frame, summary, all_of_them if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2): summary._subtract(res, o) return res
def print_diff(self, ignore=[]): """Print the diff to the last time the state of objects was measured. keyword arguments ignore -- list of objects to ignore """ # ignore this and the caller frame ignore.append(inspect.currentframe()) diff = self.get_diff(ignore) print "Added objects:" summary.print_(summary.summarize(diff['+'])) print "Removed objects:" summary.print_(summary.summarize(diff['-'])) # manual cleanup, see comment above del ignore[:]
def main(mode=MODE, target_text='こんにちは。世界。これはテストのメッセージです。お疲れ様です。'): print('SUMMARY MODE: ', MODE_SUMMARY) print('SENRYU MODE:', mode) print('TARGET TEXT', target_text) words = summarize(target_text, method=MODE_SUMMARY) print('CREATED WORDS', words) if mode == 'S': created_senryu = senryu(words=words, mode=mode, encoder_model=encoder_model_shimizu, decoder_model=decoder_model_shimizu, is_load_weight=True, word2vec_model=word2vec_model, tokenizer=tokenizer, detokenizer=detokenizer) elif mode == 'W': created_senryu = senryu(words=words, mode=mode, encoder_model=encoder_model_watanabe, decoder_model=decoder_model_watanabe, is_load_weight=True, word2vec_model=word2vec_model, tokenizer=tokenizer, detokenizer=detokenizer) # print('CREATED SENRYU', created_senryu) print('CREATED SENRYU', created_senryu) return created_senryu
def hello(): if request.method == "POST": text = request.form.get("text", None) if text is None: return render_template("index.html", summary="Error, didn't get text to summarize.") else: length = int(request.form.get("length", 3)) # default length of 3 s = summarize(request.form.get("text", None), raw=True, length=length) return render_template("index.html", summary=s, text=text) return render_template("index.html")
def __init__(self, ignore_self=True): """Constructor. The number of summaries managed by the tracker has an performance impact on new summaries, iff you decide to exclude them from further summaries. Therefore it is suggested to use them economically. Keyword arguments: ignore_self -- summaries managed by this object will be ignored. """ self.s0 = summary.summarize(muppy.get_objects()) self.summaries = {} self.ignore_self = ignore_self
def hello(): if request.method == 'POST': text = request.form.get('text', None) if text is None: return render_template( 'index.html', summary="Error, didn't get text to summarize.") else: length = int(request.form.get('length', 3)) # default length of 3 s = summarize(request.form.get('text', None), raw=True, length=length) return render_template('index.html', summary=s, text=text) return render_template('index.html')
def extract_summary(): if len(FLAG)==1: file = upload_file_location[0] if (os.path.basename(file).split('.')[1]) == "mp4": text_file = extract_text(file) elif (os.path.basename(file).split('.')[1]) == "wav": text_file = extract_text(file) else: text_file = file else: text_file = text_file_location[0] s = [] ranked_sentences = summarize(text_file) for i in range(2): s = ''.join(ranked_sentences[i][1]) flash(s) return render_template('dashboard.html')
def compute_num_param(model_TAG): control_name = model_TAG.split('_')[3:] config.PARAM['cell_name'] = control_name[0] config.PARAM['embedding_size'] = int(control_name[1]) config.PARAM['hidden_size'] = int(control_name[2]) config.PARAM['num_layer'] = int(control_name[3]) config.PARAM['sharing_rates'] = float(control_name[4]) config.PARAM['dropout'] = float(control_name[5]) config.PARAM['tied'] = int(control_name[6]) model = eval('models.{}().to(device)'.format(config.PARAM['model_name'])) summary = summarize(batch_dataset, model) total_num_param = summary['total_num_param'] all = total_num_param - (len(config.PARAM['vocab']) * config.PARAM['embedding_size'] ) if config.PARAM['tied'] else total_num_param rnn = total_num_param - ( len(config.PARAM['vocab']) * config.PARAM['embedding_size']) - ( (len(config.PARAM['vocab']) + 1) * config.PARAM['embedding_size']) num_param = {'all': all, 'rnn': rnn} return num_param
def main(): arguments = docopt(__doc__, version='SDSS Tools 0.0.1') db = (('-d' in arguments and arguments['-d']) or 'sqlite:///sqlshare-sdss.sqlite') if arguments['consume']: consume_logs.consume( db, arguments['INPUT'], arguments['sdss'], arguments['-v']) if arguments['summarize']: summary.summarize(db, arguments['sdss']) if arguments['explain']: # config = {} # with open(arguments['CONFIG']) as f: # for line in f: # key, val = line.split('=') # config[key.strip()] = val.strip() segments = [0, 1] if arguments['-s']: segments = [int(arguments['SEGMENT']), int(arguments['NUMBER'])] if arguments['sdss']: # explain_queries.explain_sdss(config, db, arguments['-q'], segments, arguments['--dry'], arguments['-o']) explain_queries.explain_sdss(db, arguments['-q'], segments, arguments['--dry'], arguments['-o']) elif arguments['tpch']: # explain_queries.explain_tpch(config, db, arguments['-q'], arguments['--dry']) explain_queries.explain_tpch(db, arguments['-q'], arguments['--dry']) else: if arguments['--second']: # explain_queries.explain_sqlshare(config, db, arguments['-q'], False, arguments['--dry']) explain_queries.explain_sqlshare(db, arguments['-q'], False, arguments['--dry']) else: # explain_queries.explain_sqlshare(config, db, arguments['-q'], True, arguments['--dry']) explain_queries.explain_sqlshare(db, arguments['-q'], True, arguments['--dry']) if arguments['extract']: if arguments['sdss']: query_extract.extract_sdss(db) elif arguments['tpch']: query_extract.extract_tpch(db) else: query_extract.extract_sqlshare(db) if arguments['analyze']: if arguments['sdss']: query_analysis.analyze_sdss(db, arguments['--recurring']) elif arguments['tpch']: query_analysis.analyze_tpch(db) else: query_analysis.analyze_sqlshare(db, False) if arguments['analyze2']: if arguments['sqlshare']: query_analysis2.analyse2(db) if arguments['getmetrics']: if arguments['sqlshare']: get_complexity_metrics.getmetrics(db) # if arguments['calcsimilarity']: # if arguments['sqlshare']: # attributesimilarity.calculate(db) if arguments['columnsimilarity']: if arguments['sqlshare']: workloaddistance.calculate(db)
def main(cargs): # folder from where dude is called cfolder = os.getcwd() # parse command line (options, cargs) = parser.parse_args(cargs) # check if a command has been given if cargs == []: parser.print_help() sys.exit() # create requires no Dudefile, so we deal with it right here if cargs[0] == "create": if len(cargs) < 2: expgen.create() else: expgen.create(cargs[1]) sys.exit(0) # all other commands require a Dudefile, so we first load it (in "cfg") cfg = None # use a given dudefile in options if options.expfile != None: try: cfg = imp.load_source('', options.expfile) except IOError: print >> sys.stderr, 'ERROR: Loading', options.expfile, 'failed' parser.print_help() sys.exit(1) else: # try default file names current = os.getcwd() max_folder = 10 # arbitrary number of parent directories i = 0 while i < max_folder: for f in ['desc.py', 'dudefile', 'Dudefile', 'dudefile.py']: try: if os.path.exists(f) and i > 0: print "Opening Dudefile: ", os.path.abspath(f) cfg = imp.load_source('', f) break except IOError: pass if cfg != None: break else: i += 1 parent, last = os.path.split(current) os.chdir(parent) current = parent if cfg == None: print >> sys.stderr, 'ERROR: no dudefile found' parser.print_help() sys.exit(1) # add to actual folder as root in cfg cfg.root = os.getcwd() # check if cfg can be used for core functions core.check_cfg(cfg) # check if cfg can be used for summaries summary.check_cfg(cfg) # parse arguments to module if options.margs: margs = args.parse(";".join(options.margs)) print "Passing arguments:", margs args.set_args(cfg, margs) if hasattr(cfg, 'dude_version') and cfg.dude_version >= 3: dimensions.update(cfg) # collect filters filters = [] if options.filter and options.filter != []: for fi in options.filter: for f in fi.split(','): filters.append(cfg.filters[f]) if options.filter_inline and options.filter_inline != []: filters += filt.create_inline_filter(cfg, options.filter_inline) if options.filter_path: current = os.getcwd() if current != cfolder: # this assumes Dudefile is in the root of the experiment folder os.chdir(cfolder) path = os.path.abspath(options.filter_path) os.chdir(current) path = os.path.relpath(path) # get raw_output_dir/exp_... format else: path = options.filter_path filters += filt.filter_path(cfg, path) # get experiments experiments = core.get_experiments(cfg) # select the set of experiments to be considered (successful, # failed or pending) if (options.success and options.failed and options.pending) or\ not (options.success or options.failed or options.pending): pass else: failed, pending = core.get_failed_pending_exp(cfg, experiments) expin = [] expout = [] if options.failed: expin += failed else: expout += failed if options.pending: expin += pending else: expout += pending if options.success: experiments = [exp for exp in experiments if exp not in expout] else: experiments = expin # apply filters if filters != []: experiments = filt.filter_experiments(cfg, filters, options.invert, False, experiments) cmd = cargs[0] if cmd == 'run': if options.force: clean.clean_experiments(cfg, experiments) execute.run(cfg, experiments, options) elif cmd == 'run-once': assert len(experiments) == 1 optpt = experiments[0] folder = "once" utils.checkFolder(folder) # create if necessary if options.force: clean.clean_experiment(folder) execute.execute_isolated(cfg, optpt, folder, options.show_output) elif cmd == 'sum': summary.summarize(cfg, experiments, cargs[1:], options.backend, options.ignore_status) elif cmd == 'list': for experiment in experiments: if options.dict: print "experiment:", experiment else: print core.get_folder(cfg, experiment) elif cmd == 'failed': failed = core.get_failed(cfg, experiments, False) for ffile in failed: print ffile elif cmd == 'missing': failed = core.get_failed(cfg, experiments, True) for exp in failed: print exp elif cmd == 'clean': if options.invalid: clean.clean_invalid_experiments(cfg, experiments) else: # TODO if no filter applied, ask if that's really what the # user wants. r = 'y' if options.filter == None and \ options.filter_inline == None: print "sure to wanna delete everything? [y/N]" r = utils.getch() #raw_input("Skip, quit, or continue? #[s/q/c]") if r == 'y': clean.clean_experiments(cfg, experiments) elif cmd == 'visit': if len(cargs) < 2: print "Specify a bash command after visit" sys.exit(1) elif len(cargs) > 2: print "Surround multi-term bash commands with \"\"." print "e.g., \"%s\"" % ' '.join(cargs[1:]) sys.exit(1) visit.visit_cmd_experiments(cfg, experiments, cargs[1]) elif cmd == 'info': info.show_info(cfg, experiments) elif cmd == 'status': info.print_status(cfg, experiments) else: print >> sys.stderr, "ERROR: wrong command. %s" % cargs[0] parser.print_help()
def main(): arguments = docopt(__doc__, version='SDSS Tools 0.0.1') db = (('-d' in arguments and arguments['-d']) or 'sqlite:///sqlshare-sdss.sqlite') if arguments['consume']: consume_logs.consume(db, arguments['INPUT'], arguments['sdss'], arguments['-v']) if arguments['summarize']: summary.summarize(db, arguments['sdss']) if arguments['explain']: # config = {} # with open(arguments['CONFIG']) as f: # for line in f: # key, val = line.split('=') # config[key.strip()] = val.strip() segments = [0, 1] if arguments['-s']: segments = [int(arguments['SEGMENT']), int(arguments['NUMBER'])] if arguments['sdss']: # explain_queries.explain_sdss(config, db, arguments['-q'], segments, arguments['--dry'], arguments['-o']) explain_queries.explain_sdss(db, arguments['-q'], segments, arguments['--dry'], arguments['-o']) elif arguments['tpch']: # explain_queries.explain_tpch(config, db, arguments['-q'], arguments['--dry']) explain_queries.explain_tpch(db, arguments['-q'], arguments['--dry']) else: if arguments['--second']: # explain_queries.explain_sqlshare(config, db, arguments['-q'], False, arguments['--dry']) explain_queries.explain_sqlshare(db, arguments['-q'], False, arguments['--dry']) else: # explain_queries.explain_sqlshare(config, db, arguments['-q'], True, arguments['--dry']) explain_queries.explain_sqlshare(db, arguments['-q'], True, arguments['--dry']) if arguments['extract']: if arguments['sdss']: query_extract.extract_sdss(db) elif arguments['tpch']: query_extract.extract_tpch(db) else: query_extract.extract_sqlshare(db) if arguments['analyze']: if arguments['sdss']: query_analysis.analyze_sdss(db, arguments['--recurring']) elif arguments['tpch']: query_analysis.analyze_tpch(db) else: query_analysis.analyze_sqlshare(db, False) if arguments['analyze2']: if arguments['sqlshare']: query_analysis2.analyse2(db) if arguments['getmetrics']: if arguments['sqlshare']: get_complexity_metrics.getmetrics(db) # if arguments['calcsimilarity']: # if arguments['sqlshare']: # attributesimilarity.calculate(db) if arguments['columnsimilarity']: if arguments['sqlshare']: workloaddistance.calculate(db)
print("Cube: no input") elif cubeVolumeList: print("Cube: ", end="") print(*cubeVolumeList, sep=", ") if not pyramidVolumeList: print("Pyramid: no input") elif pyramidVolumeList: print("Pyramid: ", end="") print(*pyramidVolumeList, sep=", ") if not ellipsoidVolumeList: print("Ellipsoid: no input") elif ellipsoidVolumeList: print("Ellipsoid: ", end="") print(*ellipsoidVolumeList, sep=", ") # gives the information to summary method in summary file sm.summarize(cubeVolumeList, pyramidVolumeList, ellipsoidVolumeList, testNumber) # breaks the loop break # if they chose cube elif shape == "cube" or shape == "c": # prompts user for dimensions a = int(input("what is the length of the cube: ")) # calculates the volume in python file volume and prints the calculated volume after cube_volume = vl.cube_volume(a) print("The volume of a cube with length ", a, " is: ", cube_volume) # adds the volume to list of volumes cubeVolumeList.append(cube_volume) # if they choose pyramid elif shape == "pyramid" or shape == "p": # prompts user for dimensions b = int(input("what is the base of the pyramid: "))
def biff(diff): """ biff """ summary = summarize(diff) notify(summary)