コード例 #1
0
ファイル: main.py プロジェクト: mshariqa/programming_project
def menu2(companydata, companylist, comindex):
    utils.cls()

    print("=" * 26, "You selected", "=" * 26)
    print((companylist.iloc[int(comindex)]))
    loop = True
    while loop:
        print("\n" + "=" * 25 + "Stock Options" + "=" * 25)
        print("\nSelect options (1-6):")
        print("1. Summary")
        print("2. Graph")
        print("3. Historical Data")
        print("4. Future Prediction")
        print("5. Go back to previous menu")
        print("6. Exit")
        option = input(
            "\nYour option:                                ").lstrip()
        if option == '1':
            summary(companydata)
        elif option == '2':
            graph(companylist, comindex),
        elif option == '3':
            historicaldata(companydata),
        elif option == '4':
            prediction(companydata),
        elif option == '5':
            menu()
        elif option == '6':
            print("\nThanks for coming. Please visit again ...\n")
            loop = False
            exit()
        else:
            print("\nWrong option. Try again ...")
コード例 #2
0
ファイル: main.py プロジェクト: alzayats/LCFCN
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('-e','--exp_name', default="trancos")
  parser.add_argument('-m','--mode', default="summary")
  args = parser.parse_args()

  dataset_name, model_name, metric_name = ut.get_experiment(args.exp_name)

  # Paths
  name = "{}_{}".format(dataset_name, model_name)
  
  path_model = "checkpoints/model_{}.pth".format(name)
  path_opt = "checkpoints/opt_{}.pth".format(name)
  path_best_model = "checkpoints/best_model_{}.pth".format(name)
  path_history = "checkpoints/history_{}.json".format(name)


  if args.mode == "train":
    train.train(dataset_name, model_name, metric_name, path_history, path_model, path_opt, path_best_model,)
  
  if args.mode == "test":
    test.test(dataset_name, model_name, metric_name, path_history, path_best_model)

  if args.mode == "summary":      
    summary.summary(dataset_name, model_name, path_history)
コード例 #3
0
def mainmenu():
    print("~~ Swin Virtual Run ~~\n")
    print("[1] Single Registration")
    print("[2] Couple Registration")
    print("[3] Group Registration")
    print("[4] Calculate and Quit\n\n")
    print("[0] Summary\n")
    choice = input("Choice:")
    if int(choice) == 1:
        ticket = input("How many single ticket?")
        print("Registration for " + str(ticket) + " runner is successful")
        calculatepurchase.cal1(ticket)
        print("")
        menu.mainmenu()
    elif int(choice) == 2:
        couple = input("How many couple?")
        print("Registration for " + str(couple) + " couple is successful")
        calculatepurchase.cal2(couple)
        print("")
        menu.mainmenu()
    elif int(choice) == 3:
        menu.groupmenu()
        print("")
        menu.mainmenu()
    elif int(choice) == 4:
        displaypurchase.displaybuy()
    elif int(choice) == 0:
        summary.summary()
        menu.mainmenu()
    else:
        print("Please select correct choice menu !!!")
        menu.mainmenu()
コード例 #4
0
ファイル: RUN.py プロジェクト: daxborde/speech-summ
def run_me(filename):
    print("Begin upload:")
    sr.upload(filename)
    print("Upload finished. Begin transcription:")
    sr.transcribe_gcs(sr.blob_q.get(), "transcription.txt")
    print("Transcription finished. Begin summary:")
    sum.summary("transcription.txt")
    print("Summary finished. Begin keywords:")
    sum.get_keywords("transcription.txt")
コード例 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-e', '--exp_name', default="acacia")
    parser.add_argument('-m', '--mode', default=None)
    parser.add_argument('-image_path', '--image_path', default=None)
    parser.add_argument('-model_path', '--model_path', default=None)
    parser.add_argument('-model_name', '--model_name', default=None)
    parser.add_argument(
        '-r',
        '--reset',
        action="store_const",
        const=True,
        default=False,
        help="If set, a new model will be created, overwriting any previous version.")

    args = parser.parse_args()

    dataset_name, model_name, metric_name = experiments.get_experiment(
        args.exp_name)

    # Paths
    name = "{}_{}".format(dataset_name, model_name)

    # Train checkpoint file
    path_model = "checkpoints/best_model_{}.pth".format(name)
    path_opt = "checkpoints/opt_{}.pth".format(name)
    path_best_model = "checkpoints/best_model_save_{}.pth".format(name)
    path_history = "checkpoints/history_{}.json".format(name)


    if args.image_path is not None:
        applyOnImage.apply(args.image_path, args.model_name, args.model_path)

    elif args.mode == "train":
        train.train(
            dataset_name,
            model_name,
            metric_name,
            path_history,
            path_model,
            path_opt,
            path_best_model,
            args.reset)

    elif args.mode == "test":
        test.test(
            dataset_name,
            model_name,
            metric_name,
            path_history,
            path_model)

    elif args.mode == "summary":
        summary.summary(dataset_name, model_name, path_history)
コード例 #6
0
def get_profile_tweets(handle, filename):
    profile = query_tweets_from_user(handle, limit=10)
    print('Loading...')
    with open(filename, "w", encoding="utf-8") as output:
        json.dump(profile, output, cls=JSONEncoder)
    profile_dataframe = pd.read_json(filename, encoding='utf-8')

    lstt = []
    for i, val in enumerate(profile_dataframe['links']):
        if str(val) == '[]':
            lstt.append('[]')
        elif str(val[0][0:17]) == 'https://youtu.be/':
            pass
        else:
            summ = summary(profile_dataframe['links'][i][0])
            lstt.append(summ)
        profile_dataframe['summary'] = pd.DataFrame(lstt)

        is_summary = []
        for i, idx in enumerate(profile_dataframe['summary']):
            if len(str(idx)) > 3:
                is_summary.append(1)
            else:
                is_summary.append(0)

        profile_dataframe['is_summary'] = pd.DataFrame(is_summary)

    profile_dataframe.to_csv(filename[:-5] + ".csv")
    print('Loaded')
コード例 #7
0
def get_bot_response(input_text):
    global MedicalFlag, RetrieveRecordsFlag, UpdateRecordsFlag, NoRecords

    if UpdateRecordsFlag:
        UpdateRecordsFlag = False
        try:
            value = float(input_text)
            if value < 0.0 or value > 20:
                raise ValueError
            elif value > 6.0 and value < 4.0:
                ReminderFlag = True
                return "I take down liao. You are at MEDIUM risk, quite concern, go see doctor soon."
            elif value >= 11.0 or value <= 2.8:
                ReminderFlag = True
                return "I take down liao. You are at HIGH risk, alamak, you need to go see doctor now!"
            else:
                return "I take down liao. You are at NO risk. Healthy sia!"
        except:
            return "I dunno what you're talking about, please enter a valid sugar level."

    input_text = preprocessing(input_text)
    response = agent.query(input_text)

    result = response['result']

    fulfillment = result['fulfillment']

    if fulfillment['speech'] == "Tell me your sugar level":
        UpdateRecordsFlag = True
    elif fulfillment['speech'] == "Retrieving records now ah":
        RetrieveRecordsFlag = True
    elif fulfillment['speech'] == "There you go!":
        return summary(input_text)
    return fulfillment['speech']
コード例 #8
0
def main():
	import ff_pruning
	import re
	import summary
	pname=request.forms.get('phone')
	pname=re.sub('-','_',pname)
	s_v=summary.summary(pname+'_features')
	s_v=ff_pruning.ff_pruning(s_v)
	return template('summary_display',s_v=s_v)
コード例 #9
0
 def getSummary(self, url):
     sm = summary()
     if url.find('pdf') > 0:
         d = sm.readPdf(url,15)
     elif url.find('youtube') > 0:
         d = sm.getYouTubeContext(url,15)
     else:
         d = sm.getSummaryFromUrl(url,15)
     return d
コード例 #10
0
ファイル: run.py プロジェクト: mtanneau/cblib-base
def run(solver, probfile, solfile, paramfile, printer, callback):

  ss = None
  try:
    timebefore = time.time()
    for (i,pp) in enumerate(CBFdata(probfile).iterator()):
      if printer:
        printer('File read: %.2f seconds' % (time.time() - timebefore))

      timebefore = time.time()
      solver.read(probfile, paramfile, pp)
      if printer:
        printer('Solver read: %.2f seconds' % (time.time() - timebefore))

      pptime = Timer(solver.optimize).timeit(number=1)
      if printer:
        solver.report()

      # Write to file
      ppsol = solver.getsolution()

      if i == 0:
        if os.path.dirname(solfile) and not os.path.exists(os.path.dirname(solfile)):
          os.makedirs(os.path.dirname(solfile))
        ss = open(solfile, 'w')
      else:
        ss.write('CHANGE\n\n')

      ppsol.printsol( lambda x: ss.write(str(x) + '\n') )

      # Write to printer
      if printer:
        printer('\n' + pp.name + '[' + str(i) + ']: ' + str(pptime) + 's ' + solfile)
        summary(pp, ppsol, printer)

      if callback:
        callback(CBFrunstat(pp, i, solver, ppsol, pptime))

      timebefore = time.time()

  finally:
    if ss is not None:
      ss.close()
コード例 #11
0
ファイル: gmail.py プロジェクト: DeepSE/gmail-summary
def sendmail(to, title, body, inreply="new"):
    sum_body =  summary(body)
    message = "Subject: " + title + \
        "\nIn-Reply-To: " + inreply + \
        "\nFrom: Auto Summary <*****@*****.**>" + \
        "\n\n" + sum_body + "\n---\n" + body

    with smtplib.SMTP_SSL("smtp.gmail.com", context=ssl_context) as server:
        server.login(id, password)
        server.sendmail(id, to, message.encode("utf8"))
コード例 #12
0
	def loadXml(self,node):
		self.getText(node.childNodes)
		if node.nodeType!=Node.ELEMENT_NODE:
			for n in node.childNodes:
				if n.nodeType==Node.ELEMENT_NODE and n.localName == 'xdl_resource_report':
					self.loadXml(n)
		else:
			for n in node.childNodes:
				if n.nodeType==Node.ELEMENT_NODE and n.localName == 'tiles':
					el=tiles()
					el.loadXml(n)
					self.set_tiles(el)
		
			for n in node.childNodes:
				if n.nodeType==Node.ELEMENT_NODE and n.localName == 'primitive_defs':
					el=primitive_defs()
					el.loadXml(n)
					self.set_primitive_defs(el)
		
			for n in node.childNodes:
				if n.nodeType==Node.ELEMENT_NODE and n.localName == 'summary':
					el=summary()
					el.loadXml(n)
					self.set_summary(el)
		
			if node.hasAttributes():
				attrs=node.attributes
				attrId='a1'
				if attrId in attrs.keys():
					self.a1=str(attrs[attrId].value)
		
				attrId='a0'
				if attrId in attrs.keys():
					self.a0=str(attrs[attrId].value)
		
				attrId='a2'
				if attrId in attrs.keys():
					self.a2=str(attrs[attrId].value)
コード例 #13
0
def main():
	import ff_pruning
	db=MySQLdb.connect('localhost','root','root','reviews1')
	cur=db.cursor()
	pname=request.forms.get('phone')
	pname=re.sub('-','_',pname)
	query='select * from '+pname
	try:
		cur.execute(query)
	except:
		db.close()
		return 'DataBase Error'
	if cur.rowcount==0:
		db.close()
		return 'No Reviews Found yet'
	rows=cur.fetchall()
	ff=open(pname+'_features','w')
	for r in rows:
		feature_list=feature_ext.feature(r[1],pname,cur,ff)
	ff.close()
	s_v=summary.summary(pname+'_features')
	s_v=ff_pruning.ff_pruning(s_v)
	db.close()
	return template('summary_display',s_v=s_v)
コード例 #14
0
    def run(self):
        with self.input().open() as f:
            report = summary.summary(f)

        with self.output().open('w') as f:
            f.write(report)
コード例 #15
0
ファイル: model0.py プロジェクト: tracholar/alibaba2015
	f.close()
	return clf	
	
if __name__ == '__main__':
	
	X, Y = GetData()

	parms = {
	'C':np.logspace(-6,0,10),
	#'class_weight':[{0:1,1:200}] #[{0:1,1:50},{0:1,1:70},{0:1,1:85},{0:1,1:100},{0:1,1:120},{0:1,1:150}]
	}
	lr = LogisticRegression()
	clf = GridSearchCV(lr, parms, scoring='f1', n_jobs=10)

	clf.fit(X,Y)
	
	import pickle
	f = open('model0.model','wb')
	pickle.dump(clf, f)
	f.close()
	
	pred = clf.predict(X)
	
	from summary import summary,clf_summary,TestModel
	clf_summary(clf)
	summary(Y, pred)
	TestModel('model0')
	
	

コード例 #16
0
from feature_format import featureFormat, targetFeatureSplit
from summary import summary
from new_features import addFractionFeature
from tester import test_classifier, dump_classifier_and_data

### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
#features_list = ['poi', 'deferral_payments', 'total_payments', 'loan_advances', 'restricted_stock_deferred', 'deferred_income', 'total_stock_value', 'expenses', 'exercised_stock_options', 'other', 'long_term_incentive', 'restricted_stock', 'director_fees',  'fraction_from_poi', 'fraction_to_poi', 'shared_receipt_with_poi', 'fraction_bonus_salary'] # You will need to use more features
features_list = ['poi', 'total_payments', 'exercised_stock_options', 'other', 'fraction_to_poi', 'shared_receipt_with_poi', 'expenses', 'fraction_from_poi'] # You will need to use more features

### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "r") as data_file:
    data_dict = pickle.load(data_file)
    
summary(data_dict)

### Task 2: Remove outliers

data_dict.pop( 'TOTAL', 0 )

### Task 3: Create new feature(s)
### Store to my_dataset for easy export below.
addFractionFeature(data_dict, "fraction_from_poi", "from_poi_to_this_person", "to_messages")
addFractionFeature(data_dict, "fraction_to_poi", "from_this_person_to_poi", "from_messages")
addFractionFeature(data_dict, "fraction_bonus_salary", "bonus", "salary")
my_dataset = data_dict

### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
コード例 #17
0
def get_results():
    return render_template("results.html",
                           results=summary.summary(request.args["name"]))
コード例 #18
0
from huffman_coding import huffman_coding
from summary import summary
import torch
import numpy as np
from prune import prune

import copy

device = 'cuda' if torch.cuda.is_available() else 'cpu'

net = VGG16_half()
net = net.to(device)

net.load_state_dict(torch.load("net_after_pruning.pt"))
acc = test(net)

while acc > 0.9:
    # Load the best weight paramters
    net.load_state_dict(torch.load("net_after_pruning.pt"))
    test(net)

    # Test accuracy before fine-tuning
    prune(net, method='std', q=45.0, s=1.25)
    test(net)

    finetune_after_prune(net, epochs=50, batch_size=128, lr=0.001, reg=5e-4)
    net.load_state_dict(torch.load("net_after_pruning.pt"))
    acc = test(net)
    spar = summary(net)
    torch.save(net.state_dict(), "net_after_pruning%.2f_%.2f.pt" % (acc, spar))
コード例 #19
0
ファイル: views.py プロジェクト: ZhanruiLiang/labsite_dj15
def show_summary(request):
    heads, data = summary.summary()
    return make_response(request, 'summary.html', {
        'heads': heads, 'data': data,
        })
コード例 #20
0
def main(url):
    string = Web2String.url2string(url)
    print(string)
    entity = company_identifier.entity(string)
    print(entity)
    newsTitles, newsContent, newsSources, other_articles_URL = WebCrawler.getInfo(
        entity)  # array of article urls

    # main URL summary
    main_summary = summary.summary(url)
    print(main_summary)

    print(newsTitles)
    # print(string)
    main_article_sentiment = round(evaluate_NN.evaluate_NN(string) *
                                   100)  # analyze sentiment of main article
    print(main_article_sentiment)
    # arrays that will hold similar/different articles and their sentiments
    other_article_sentiment = list()
    #
    for x in range(len(other_articles_URL)):
        curr_article = Web2String.url2string(other_articles_URL[x])
        # analyze sentiment of article and put in array
        other_article_sentiment.append(
            round(evaluate_NN.evaluate_NN(curr_article) * 100))

    a_summary = list()
    for x in range(len(other_articles_URL)):
        a_summary.append(summary.summary(other_articles_URL[x]))

    stock_dates, stock_data = StockToPython.stock_to_JSON(entity)

    other_articles_titles_dict = {
        'other_articles_titles': {
            'one': newsTitles[0],
            'two': newsTitles[1],
            'three': newsTitles[2],
            'four': newsTitles[3],
            'five': newsTitles[4]
        }
    }
    other_articles_sources_dict = {
        'other_articles_sources': {
            'one': newsSources[0],
            'two': newsSources[1],
            'three': newsSources[2],
            'four': newsSources[3],
            'five': newsSources[4]
        }
    }
    other_articles_sentiment_dict = {
        'other_articles_sentiment': {
            'one': other_article_sentiment[0],
            'two': other_article_sentiment[1],
            'three': other_article_sentiment[2],
            'four': other_article_sentiment[3],
            'five': other_article_sentiment[4]
        }
    }
    other_articles_links_dict = {
        'other_articles_links': {
            'one': other_articles_URL[0],
            'two': other_articles_URL[1],
            'three': other_articles_URL[2],
            'four': other_articles_URL[3],
            'five': other_articles_URL[4]
        }
    }
    article_summary = {
        'article_summary': {
            'one': a_summary[0],
            'two': a_summary[1],
            'three': a_summary[2],
            'four': a_summary[3],
            'five': a_summary[4]
        }
    }
    main_article_sentiment_dict = {
        'main_article_sentiment': main_article_sentiment
    }
    entity_dict = {'company_name': entity}
    main_article_summary = {'main_summary': main_summary}
    stock_dates_dict = {'stock_dates': stock_dates}
    stock_data_dict = {'stock_data': stock_data}

    rv = {}

    rv.update(other_articles_titles_dict)
    rv.update(other_articles_sources_dict)
    rv.update(other_articles_sentiment_dict)
    rv.update(other_articles_links_dict)
    rv.update(main_article_sentiment_dict)
    rv.update(entity_dict)
    rv.update(article_summary)
    rv.update(main_article_summary)
    rv.update(stock_dates_dict)
    rv.update(stock_data_dict)

    rv_json = json.dumps(rv)
    print(rv_json)
    return rv_json


# main('https://www.cnet.com/news/apples-q3-earnings-are-all-about-the-iphone-11-hints/')
コード例 #21
0
ファイル: app.py プロジェクト: DeepSE/gmail-summary
def hello():
    content = request.json
    print(content['txt'])
    return summary(content['txt'])
コード例 #22
0
ファイル: root.py プロジェクト: poyhsiao/lzweb
	def config_web(cfg={}):
		# override Cherrypy's default session behaviour

		application_conf = {
			'/' : {
				'tools.staticdir.root': _curdir,
				'tools.staticdir.on' : True,
				'tools.staticdir.dir' : ".",
				'tools.sessions.on'  : True,
				'tools.sessions.storage_type' : "file",
				'tools.sessions.storage_path' : "/tmp/",
				'tools.sessions.timeout' : 60,
				'tools.sessions.locking' : 'explicit',
				'tools.auth.on': True,
				'tools.encode.on' : True,
				'tools.encode.encoding': "utf-8"
			},
			'/favicon.ico' : {
				'tools.staticfile.on' : True,
				'tools.staticfile.filename' : os.path.join(_curdir,'/image/favicon.ico')
			},
			'/css' : {
				'tools.staticdir.on' : True,
				'tools.staticdir.dir' : "css"
			},
			'/script' : {
				'tools.staticdir.on' : True,
				'tools.staticdir.dir' : "script"
			},
			'/image' : {
				'tools.staticdir.on' : True,
				'tools.staticdir.dir' : "image"
			},
		}

		#No Root controller as we provided all our own.
		#cherrypy.tree.mount(root=None, config=config)
		root = Login()
		root.logout = AuthController().logout
		root.xteralink = Interface()
		root.system = summary()
		root.system.summary = summary()
		root.system.dns = dns()
		root.system.network_header = network_header()
		root.system.network = network()
		root.system.wan_detection = wan_detection()
		root.system.fqdn = fqdn()
		root.system.ip_group = ip_group()
		root.system.service_group = service_group()
		root.system.diagnostic_tools = diagnostic_tools()
		root.system.arp_table = arp_table()
		root.system.date_time = date_time()
		root.system.ddns = ddns()
		root.system.administration = administration()
		root.service = dhcp_lan()
		root.service.dhcp_lan = dhcp_lan()
		root.service.dhcp_dmz = dhcp_dmz()
		root.service.virtual_server = virtual_server()
		root.service.firewall = firewall()
		root.service.connection_limit = connection_limit()
		root.service.auto_routing = auto_routing()
		root.service.nat = nat()
		root.service.snmp = snmp()
		root.statistics = stat_bandwidth_utilization()
		root.statistics.stat_bandwidth_utilization = stat_bandwidth_utilization()
		root.statistics.stat_wan_detection = stat_wan_detection()
		root.statistics.stat_dhcp_lan = stat_dhcp_lan()
		root.statistics.stat_dhcp_dmz = stat_dhcp_dmz()
		root.statistics.stat_fqdn = stat_fqdn()
		root.log = view();
		root.log.view = view();
		root.log.syslog = syslog();
		cherrypy.tree.mount(root, "/", config=application_conf)

		cherrypy.server.unsubscribe()
		server1 = cherrypy._cpserver.Server()
		server1.socket_port = 443
		server1._socket_host = '0.0.0.0'
		server1.ssl_certificate = '/usr/local/conf/server.crt'
		server1.ssl_private_key = '/usr/local/conf/server.key'
		server1.subscribe()

		server2 = cherrypy._cpserver.Server()
		server2.socket_port = 80
		server2._socket_host = "0.0.0.0"
		server2.subscribe()
コード例 #23
0
    else:
        print("Dataset {} not suported!".format(args.dataset))
        sys.exit(0)
else:
    #if args.dataset == "ImageNet":
    #    if not torch.distributed.is_initialized():
    #        port = np.random.randint(10000, 65536)
    #        torch.distributed.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:%d'%port, rank=0, world_size=1)
    #    model = torch.nn.parallel.DistributedDataParallel(model)

    model.load_state_dict(torch.load(args.path, map_location=device), )
    #helpers.load_state_dict(args.path)
    print("Model loaded from {}".format(args.path))

print("-----Summary before pruning-----")
summary(model)
print("-------------------------------")

pickle.dump(model, open("foo.pkl", "wb"))

if not args.prune:
    print("Option to prune and finetune not chosen. Exiting")
    sys.exit(0)

# --------------------------------------- #
# --- Pruning and finetune -------------- #
# --------------------------------------- #

# Test accuracy before fine-tuning
prune(model, method=args.prune_type, q=args.q)
if args.dataset == "CIFAR10":
コード例 #24
0
    # output.append(plot_300.history['loss'])
    # output.append(plot_300.history['acc'])
    # output.append(plot_300.history['val_loss'])
    # output.append(plot_300.history['val_acc'])
    #
    # # Shallow
    # model_shallow = build_shallow_net(dropout=0.5)
    # plot_shallow = train_net(model_shallow, epochs, data, test)
    # output.append(plot_shallow.history['loss'])
    # output.append(plot_shallow.history['acc'])
    # output.append(plot_shallow.history['val_loss'])
    # output.append(plot_shallow.history['val_acc'])

    # # Deep
    # model_deep = build_deep_net(dropout=0.5)
    # plot_deep = train_net(model_deep, epochs, data, test,
    #                       h5_path="cifar100_deep.h5")
    # output.append(plot_deep.history['loss'])
    # output.append(plot_deep.history['acc'])
    # output.append(plot_deep.history['val_loss'])
    # output.append(plot_deep.history['val_acc'])

    # with open('output.csv', 'w') as f:
    #     writer = csv.writer(f)
    #     for l in output:
    #         writer.writerow(l)


model_best = build_net(dropout=0.5)
summary(model_best)
compare(50)
コード例 #25
0
                                          stride=2)
        self.br6 = BoundaryRefinement(inchannels=21, channels=21)

    def forward(self, x):
        x = self.conv0(x)

        x1 = self.layer1(x)
        x2 = self.layer2(x1)
        x3 = self.layer3(x2)
        x4 = self.layer4(x3)

        x4 = self.path4(x4)
        x3 = self.path3(x3, x4)
        x2 = self.path2(x2, x3)
        x1 = self.path1(x1, x2)

        out = self.br5(x1)
        out = self.deconv5(out)
        out = self.br6(out)

        return out


model = LKM(resnet_extractor="resnet152")

# from torchvision.models.resnet import resnet152
#
# model = resnet152(pretrained=True)
#
summary((3, 256, 256), model)
コード例 #26
0
ファイル: train.py プロジェクト: SunYanCN/TT-Transformer
def main():
    ''' Main function '''
    parser = argparse.ArgumentParser()

    parser.add_argument('-data', required=True)

    parser.add_argument('-epoch', type=int, default=10)
    parser.add_argument('-batch_size', type=int, default=64)

    #parser.add_argument('-d_word_vec', type=int, default=512)
    parser.add_argument('-d_model', type=int, default=512)
    parser.add_argument('-d_inner_hid', type=int, default=2048)
    parser.add_argument('-d_k', type=int, default=64)
    parser.add_argument('-d_v', type=int, default=64)

    parser.add_argument('-n_head', type=int, default=8)
    parser.add_argument('-n_layers', type=int, default=6)
    parser.add_argument('-n_warmup_steps', type=int, default=4000)

    parser.add_argument('-dropout', type=float, default=0.1)
    parser.add_argument('-embs_share_weight', action='store_true')
    parser.add_argument('-proj_share_weight', action='store_true')

    parser.add_argument('-log', default=None)
    parser.add_argument('-save_model', default=None)
    parser.add_argument('-save_mode',
                        type=str,
                        choices=['all', 'best'],
                        default='best')

    parser.add_argument('-no_cuda', action='store_true')
    parser.add_argument('-label_smoothing', action='store_true')
    parser.add_argument('-seed', type=int, default=None)
    parser.add_argument(
        '-use_TT',
        nargs='+',
        choices=[Constants.embedding_, Constants.pff_, Constants.attention_])
    parser.add_argument('-n_tt_cores', nargs='+', type=int, default=3)
    parser.add_argument('-tt_rank', nargs='+', type=int, default=8)

    opt = parser.parse_args()
    opt.cuda = not opt.no_cuda
    opt.d_word_vec = opt.d_model

    # Parse TT Arguments
    opt.tt_params = {}
    if opt.use_TT:
        assert len(opt.use_TT) == len(
            opt.n_tt_cores
        ), f"Specify the number of TT-cores for each of the {opt.use_TT}"
        assert len(opt.use_TT) == len(
            opt.tt_rank
        ), f"Specify the number of TT-rank for each of the {opt.use_TT}"
        for i in range(len(opt.use_TT)):
            opt.tt_params[opt.use_TT[i]] = {
                "n_tt_cores": opt.n_tt_cores[i],
                "tt_rank": opt.tt_rank[i]
            }

    if opt.seed is not None:
        torch.random.manual_seed(opt.seed)

    #========= Loading Dataset =========#
    data = torch.load(opt.data)
    opt.max_token_seq_len = data['settings'].max_token_seq_len

    training_data, validation_data = prepare_dataloaders(data, opt)

    opt.src_vocab_size = training_data.dataset.src_vocab_size
    opt.tgt_vocab_size = training_data.dataset.tgt_vocab_size

    #========= Preparing Model =========#
    if opt.embs_share_weight:
        assert training_data.dataset.src_word2idx == training_data.dataset.tgt_word2idx, \
            'The src/tgt word2idx table are different but asked to share word embedding.'

    device = torch.device('cuda' if opt.cuda else 'cpu')

    # Print the model architecture and hyperparameters
    f = io.StringIO()
    with redirect_stdout(f):
        print(opt)
        transformer = Transformer(
            opt.src_vocab_size,
            opt.tgt_vocab_size,
            opt.max_token_seq_len,
            tgt_emb_prj_weight_sharing=opt.proj_share_weight,
            emb_src_tgt_weight_sharing=opt.embs_share_weight,
            d_k=opt.d_k,
            d_v=opt.d_v,
            d_model=opt.d_model,
            d_word_vec=opt.d_word_vec,
            d_inner=opt.d_inner_hid,
            n_layers=opt.n_layers,
            n_head=opt.n_head,
            dropout=opt.dropout,
            tt_params=opt.tt_params).to(device)

        optimizer = ScheduledOptim(
            optim.Adam(filter(lambda x: x.requires_grad,
                              transformer.parameters()),
                       betas=(0.9, 0.98),
                       eps=1e-09), opt.d_model, opt.n_warmup_steps)
        print(
            f"Number of trainable parameters: {sum(p.numel() for p in transformer.parameters() if p.requires_grad)}"
        )
        summary(transformer, [[opt.max_token_seq_len] for i in range(4)],
                dtype="long")
    architecture_summary = f.getvalue()
    print(architecture_summary)
    if opt.log:
        log_architecture_file = opt.log + '.architecture.log'
        with open(log_architecture_file, 'w') as log_a:
            log_a.write(architecture_summary)

    train(transformer, training_data, validation_data, optimizer, device, opt)
コード例 #27
0
ファイル: main.py プロジェクト: pazur/final
#! /usr/bin/python

import sys

from summary import summary

if __name__ == '__main__':
    try:
        import settings
    except Exception as e:
        print e
        sys.exit(1)
    results = []
    for number, (module_name, arguments, extra) in enumerate(settings.settings.PIPELINE):
        kwargs = {}
        for (source_number, old_arg), new_arg in arguments.iteritems():
            kwargs[new_arg] = results[source_number][old_arg]
        kwargs.update(extra)
        print "running step %d: %s" % (number, module_name)
        results.append(settings.modules[module_name].run(**kwargs))
    summary(results)
コード例 #28
0
def print_model(net, batch_size):
    aux.printc("blue", disable=False)
    summary(net, (3, 32, 32), batch_size)
    aux.printc(disable=True)
コード例 #29
0
ファイル: test.py プロジェクト: timlee0212/ECE590.10-Lab3
import torch
import numpy as np
from prune import prune

import copy

device = 'cuda' if torch.cuda.is_available() else 'cpu'

net = VGG16_half()
net = net.to(device)

# Load the best weight paramters
net.load_state_dict(torch.load("net_before_pruning.pt"))
test(net)
#
print("-----Summary before pruning-----")
summary(net)
print("-------------------------------")
#
# ### Pruning & Finetune with pruned connections
# # Test accuracy before fine-tuning
#
prune(net, method='std', q=0.45, s=0.75)
#
# print("-----Summary after pruning-----")
summary(net)
# print("-------------------------------")
#


# %%
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()

from raw_data import raw_data
from summary import summary
import select_data

# %%
if __name__ == '__main__':
    raw_data = raw_data(spark)

    filtered_data = select_data.select_data_between_days(
        raw_data, '2020-04-01', '2020-04-30')
    print(summary(filtered_data))
コード例 #31
0
ファイル: cifar_print_dims.py プロジェクト: icmlanon/BP
                                  bs=128).normalize(stats)
"""Select model here:"""
#data = ImageDataBunch.from_folder(path)
#model = models.resnet18#resnet18(num_classes=10)#alexnet(num_classes=10)
#model = resnet18(num_classes=10)
#model = ResNet18(num_classes=10)
#model = models.xresnet18(num_classes=10)
#model = alexnet(num_classes=10)
#model = quant_alexnet(num_classes=10)
#model = quant_alexnet_bin(num_classes=10)

#model = resnet18(num_classes=10)
#model = ResNet18()
model = Quant_ResNet18()

loss_func = Quant_Loss(model=model, reg_strength=0.01)
#loss_func = nn.CrossEntropyLoss()
learn = Learner(data, model.cuda(), metrics=[accuracy], loss_func=loss_func)
learn.callback_fns += [
    partial(TrackEpochCallback),
    partial(SaveModelCallback, every='epoch', name='model')
]

summary(model, (3, 32, 32))
#print_model_mod(model)
#learn.fit_one_cycle(300)
#learn.fit(500)
print_model_dims(model)
#learn.save('alex')
#learn.fit_one_cycle(35, 3e-3, wd=0.4)
コード例 #32
0
from summary import summary

if __name__ == '__main__':
    print(summary('s03t05_load_json_test.json', 'visibility'))
    print(summary('s03t05_load_json_test.json', 'purpose'))
    print(summary('s03t05_load_json_test.json', 'no items with this key'))
    print(summary('s03t05_load_json_invalid.json', 'purpose'))
コード例 #33
0
MODEL_FILENAME = 'breakout' + '_' + 'DQN' + '_'
# our graphing function
#summary sets the ranges and targets and saves the graph
graph = summary(
    summary_types=[
        'sumiz_step', 'sumiz_time', 'sumiz_reward', 'sumiz_epsilon'
    ],
    # the optimal step count of the optimal policy
    step_goal=0,
    # the maximum reward for the optimal policy
    reward_goal=0,
    # maximum exploitation value
    epsilon_goal=0.99,
    # desired name for file
    NAME=MODEL_FILENAME + str(now),
    # file path to save graph. i.e "/Desktop/Py/Scenario_Comparasion/Maze/Model/"
    # SAVE_PATH = "/github/Gym-T4-Testbed/Gym-T4-Testbed/temp_Graphs/",
    SAVE_PATH="/Code-Comparison/DQN/Graphs/",
    # episode upper bound for graph
    EPISODE_MAX=int(args.episodes),
    # step upper bound for graph
    STEP_MAX_M=1000,
    # time upper bound for graph
    TIME_MAX_M=50,
    # reward upper bound for graph
    REWARD_MIN_M=0,
    # reward lower bound for graph
    REWARD_MAX_M=10)


class dqnagent():
コード例 #34
0
ファイル: main.py プロジェクト: Nadineioes/acti.monash.final
    if error != "None":
        raise MyError
    import Validate
    error = open("crashes.txt").readline()
    if error != "None":
        raise MyError
    if open("Mode.txt").readline() == "Light":
        import LightData

    # writes to "sleepindex.csv" with a copy of data from "sleepfile.csv"
    # adds the columns "sleepindex" and "movingaverages" to this. (movingaverages is the average of ACTIVITY)
    import CalculateSleepIndex
    CalculateSleepIndex.full_csv_second("sleepfile.csv")

    import summary
    summary.summary(name + "." + extension)

    # uses "sleepindex.csv" to plot
    import plotter

    f.close()
    f = open("worked.txt", "w")
    f.write("True")
    f.close()

except MyError:
    pass
except Exception:
    exc_type, exc_obj, exc_tb = sys.exc_info()

    ef = open("Error_Log.txt", "w")
コード例 #35
0
ファイル: model18.py プロジェクト: fagan2888/alibaba2015
	tr = StandardScaler()
	pca = PCA(n_components=10)
	lr = LinearSVC(penalty='l1',dual=False)
	pipe = Pipeline([('tr',tr),('pca',pca),('lr',lr)])
	clf = GridSearchCV(pipe, parms, scoring='f1', n_jobs=5)

	
	
	

	clf.fit(X,Y)
	
	import pickle
	f = open('%s.model' % __fname__,'wb')
	pickle.dump(clf, f)
	f.close()
	
	
	
	pred = clf.predict(X)
	
	summary.clf_summary(clf, feature_names)
	summary.summary(Y, pred)
	
	
	F1, P, R = TestModel()
	
	util.notify_me('%s.F1:%.2f,P:%.2f,R:%.2f' % (__fname__, F1*100, P*100, R*100))


コード例 #36
0
def publish(days=(0, )):
    file_name = "./publish.{0}.md".format(
        datetime.now().strftime(DAILY_FORMAT))
    if os.path.exists(file_name):
        print("今天已经导出了文档了~~")
    lines = []
    lines.extend(build_title())
    # 标头
    datas = load_data_from_mongo(days)

    for category, articles in datas.items():
        # 书写大分类标题
        line = build_markdown(category, MarkdownType.CATEGORY)
        lines.append(line)
        del line
        for article in articles:
            # 标题
            if article is None:
                continue
            line = build_markdown(article.get("title", None),
                                  MarkdownType.TITLE, article.get("url", None))
            lines.append(line)
            del line
            # 论文
            code = article.get("code", "")
            if code.strip() != '':
                # 论文块级别
                author = article.get("author", "")
                tags = article.get("tags", "")
                description = article.get("description", "")
                description_list = description.split('\n\n')
                if len(description_list) > 0:
                    description = description_list[0]
                line = build_markdown([author, tags, code, description],
                                      MarkdownType.PAPER)
                lines.append(line)
                del line
            # 其他类型,包括新闻、小说、科技,财经
            else:
                # 概要
                description = article.get("description", "")
                # intro 小说
                intro = article.get("intro", "")
                if description.strip() == '':
                    if intro.strip() != "":
                        # 小说内容
                        author = article.get("author", "")
                        tags = article.get("tags", "")
                        count = article.get("count", "").replace("\n", "")
                        status = article.get("status", "")
                        line = build_markdown([
                            author, tags,
                            intro.replace("\n", ""), count, status
                        ], MarkdownType.NOVEL)
                        lines.append(line)
                        del line
                    else:
                        text = article.get("content", None)
                        if text is None or text.strip() in ('', 'None'):
                            summ = article.get('title')
                        else:
                            summ = summary(article.get('title'), text)
                            if summ.strip() == "":
                                summ = article.get("title", None)
                        line = build_markdown(summ, MarkdownType.REFERENCE)
                        lines.append(line)
                        del line
                else:
                    line = build_markdown(description, MarkdownType.REFERENCE)
                    lines.append(line)
                    del line
    # 导出Markdown文件
    with open(file_name, mode='w', encoding='utf-8') as f:
        for line in lines:
            f.write(line)
            f.write('\n')
        print("导出今日日报成功了~~")
コード例 #37
0
	pname=p[1]
	pname=re.sub('-','_',pname)
	print pname
	ff=open(pname+'_features','w')
	#query='create table '+pname+'_f(feature varchar(15),orien int)'
	#try:
		#print query
		#cur.execute(query)
		#print 'Table Created'
	#except:
		#print 'could not create feature table'
	query='select * from '+pname
	try:
		cur.execute(query)
	except:
		print 'review table error'
	
	if cur.rowcount==0:
		print 'No reviews Found'
	else:	
		rows=cur.fetchall()
		for r in rows:
			feature_ext.feature(r[1],pname,cur,ff)
	
	ff.close()

for p in phones:
	pname=p[1]
	pname=re.sub('-','_',pname)
	summary.summary(pname+'_features')					
コード例 #38
0
def update_stattable(statfile, probfiles, solfiles):

  # Find the directory of this script
  scriptdir = os.path.split(inspect.getfile( inspect.currentframe() ))[0]
  rootdir = os.path.join(scriptdir,'..','..')

  # Read instance status data
  csvfile = open(statfile, 'rt')
  csvdialect = csv.Sniffer().sniff(csvfile.read(), ';\t')
  csvfile.seek(0)
  csvreader = csv.reader(csvfile, csvdialect, quotechar='"')
  header = next(csvreader)
  statdictnames = ['pack','name','psense','pcer','perr','pobj','dcer','derr','dobj','claim']
  statdict = dict((rows[1], dict((x,y) for x,y in zip(statdictnames, rows))) for rows in csvreader)
  csvfile.close()

  # Update instance status data
  for (probfile, solfile) in zip(probfiles, solfiles):
    sys.stdout.write('\n' + solfile + '\n')
    solstat = summary(probfile, solfile, lambda x: None)
    csvstat = statdict[solstat['prob'].name]
    isminimize = (solstat['prob'].obj.strip().upper() == 'MIN')

    cchanged = False
    pchanged = False
    dchanged = False

    if 'claim' in solstat:
      if not csvstat['claim'].strip() and solstat['claim'].strip():
        cchanged = True
        csvstat['claim'] = solstat['claim']
      elif csvstat['claim'].strip() and solstat['claim'].strip() and csvstat['claim'].strip().upper() != solstat['claim']:
        raise Exception(solstat['prob'].name + ': The claim ' + solstat['claim'] + ' is incompatible with the existing claim ' + csvstat['claim'])

    if 'psol' in solstat:
      psol = solstat['psol']
      perr = max(psol[1].values())
      if worty_replacement(csvstat['perr'], csvstat['pobj'], perr, psol[0], isminimize):
        pchanged = True
        csvstat['pcer'] = 'FEASIBILITY'
        csvstat['perr'] = '{0:.16g}'.format(perr)
        csvstat['pobj'] = '{0:.16g}'.format(psol[0])

    if 'pinfeascer' in solstat:
      pinfeascer = solstat['pinfeascer']
      perr = max(pinfeascer[1].values())
      if worty_replacement(csvstat['perr'], csvstat['pobj'], perr, pinfeascer[0], isminimize):
        pchanged = True        
        csvstat['pcer'] = 'INFEASIBILITY'
        csvstat['perr'] = '{0:.16g}'.format(perr)
        csvstat['pobj'] = '{0:.16g}'.format(pinfeascer[0])

    if solstat['prob'].intvarnum == 0:
      if 'dsol' in solstat:
        dsol = solstat['dsol']
        derr = max(dsol[1].values())
        if worty_replacement(csvstat['derr'], csvstat['dobj'], derr, dsol[0], not isminimize):
          dchanged = True
          csvstat['dcer'] = 'FEASIBILITY'
          csvstat['derr'] = '{0:.16g}'.format(derr)
          csvstat['dobj'] = '{0:.16g}'.format(dsol[0])

      if 'dinfeascer' in solstat:
        dinfeascer = solstat['dinfeascer']
        derr = max(dinfeascer[1].values())
        if worty_replacement(csvstat['derr'], csvstat['dobj'], derr, dinfeascer[0], not isminimize):
          dchanged = True
          csvstat['dcer'] = 'INFEASIBILITY'
          csvstat['derr'] = '{0:.16g}'.format(derr)
          csvstat['dobj'] = '{0:.16g}'.format(dinfeascer[0])

    if cchanged or pchanged or dchanged:
      sys.stdout.write('PENDING CHANGES: ')
      if cchanged:
        sys.stdout.write('CLAIMN ')
      if pchanged:
        sys.stdout.write('PRIMAL ')
      if dchanged:
        sys.stdout.write('DUAL ')

  # Define instance sorting
  convert = lambda text: int(text) if text.isdigit() else text
  alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
  sortedstatdict = sorted(statdict, key=alphanum_key)

  # Write instance status data
  csvfile = open(statfile, 'wt')
  csvwriter = csv.writer(csvfile, csvdialect, quotechar='"')
  csvwriter.writerow(header)
  for k in sortedstatdict:
    csvwriter.writerow([statdict[k][x] for x in statdictnames])
  csvfile.close()
コード例 #39
0
from torch.autograd import Variable
from summary import summary
from model import PartialUNet
import numpy as np
import torch

if __name__ == '__main__':
    image = Variable(
        torch.from_numpy(np.random.random([1, 3, 480, 512])).float()).cuda()
    mask = Variable(torch.from_numpy(np.random.randint(
        0, 2, [1, 3, 480, 512]))).cuda()
    model = PartialUNet().cuda()
    summary(model, input_size=[(3, 480, 512), (3, 480, 512)])
    model(image, mask)
                person = person + 1
                detect_mature = 0

            # To save memory we make sure that the gender & age detection model just work on one frame

            detect_mature += 1

            # Centroid for ID retaining of a detected face
            # Drawing the bounding box of the face along with the associated
            cv2.putText(frame, text_ID, (centroid[0] - 10, centroid[1] - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
            cv2.rectangle(frame, (centroid[0], centroid[1]),
                          (centroid[2], centroid[3]), (0, 0, 255), 2)
            if (date != str(datetime.date.today().strftime("%B %d, %Y"))):
                summary(T_age, T_gender, Total, detected, date)
                print("[Info]: Summary file saved . . .")
                date = str(datetime.date.today().strftime("%B %d, %Y"))
                T_age = [0, 0, 0, 0, 0, 0, 0, 0]
                T_gender = [0, 0]
                Total = 0

# For streaming the output frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF

    # If the `q` key was pressed the stream will end
    if key == ord("q"):
        break

# For cleaning up the space