Пример #1
0
def textChanger(pdfText, mostAuthor="", mostPaper="",extractOptions=["nltk",5,5,5],devMode=False):
    """"Takes the semi-cleaned text of a pdf and extracts the desired portions. Output in markdown suitable for display on the website."""
    pdfText = pre_clean.pre_clean(pdfText)
    if mostAuthor:
        mostAuthor = evaluator(authorCounter(pdfText))
    if mostPaper:
        mostPaper = evaluator(paperCounter(pdfText))
    ex = extract(pdfText,extractOptions)
    return ex
Пример #2
0
 def test(self,sess):
     start_time = datetime.datetime.now()
     img_r,img_i,lab = image_test(self.raw_path,self.label_path)
     img_r_raw = image_add_border(img_r[0],[img_r[0].shape[0]+6,img_r[0].shape[1]+6])
     img_i_raw = image_add_border(img_i[0],[img_i[0].shape[0]+6,img_i[0].shape[1]+6])
     feed_dict={self.input_real:[img_r_raw],self.input_imag:[img_i_raw],self.input_raw_r:img_r,self.input_raw_i:img_i,self.label:lab,self.decay_steps:1,self.keep_prob:1.,self.keep_prob2:1.}
     
     pred_label = sess.run(self.out, feed_dict=feed_dict)
     print("image_size: "+str(pred_label.shape[1:]))
     print("classification time: "+(str(datetime.datetime.now()-start_time)[5:])+' (s)')
     print('classification image saving...')
     pred_file = image_save(pred_label[0,...],pred_label.shape[1],pred_label.shape[2],"CRPM_Net",self.label_path,True)
     evaluator(pred_file,self.label_path)
Пример #3
0
def textChanger(pdfText,
                mostAuthor="",
                mostPaper="",
                extractOptions=["nltk", 5, 5, 5],
                devMode=False):
    """"Takes the semi-cleaned text of a pdf and extracts the desired portions. Output in markdown suitable for display on the website."""
    pdfText = pre_clean.pre_clean(pdfText)
    if mostAuthor:
        mostAuthor = evaluator(authorCounter(pdfText))
    if mostPaper:
        mostPaper = evaluator(paperCounter(pdfText))
    ex = extract(pdfText, extractOptions)
    return ex
Пример #4
0
    def __init__(self, work_path, tool_path, train_fname, test_fname, gt_fname,
                 rl_path, rl_names, sel_uni_fname):
        # path to folder stores trained models and generated results
        self.work_path = work_path
        # path to the executable file of RankSVM or RankNet
        self.tool_path = tool_path
        # filename of training data (different format for different tool)
        self.train_fname = train_fname
        # filename of testing data
        self.test_fname = test_fname
        # filename of ground truth
        self.gt_fname = gt_fname
        # historical ranking lists from traditional methods (for Evaluator)
        self.rl_path = rl_path
        self.rl_names = rl_names
        os.chdir(self.work_path)

        # read selected universities
        self.sel_unis = []
        data = np.genfromtxt(sel_uni_fname, dtype=str, delimiter=',')
        if len(data.shape) == 1:
            self.sel_unis = copy.copy(data)
        else:
            for uni_kv in data:
                self.sel_unis.append(uni_kv[0])
        print '#universities selected:', len(self.sel_unis)

        # initialize evaluator
        self.evaluator = evaluator.evaluator(self.rl_path, self.rl_names,
                                             self.gt_fname)
Пример #5
0
 def process(self, connection, source, target, args):
     if len(args) < 2:
         return None
     
     f_name = args[0]
     expr = u' '.join(args[1:])
     if 'lambda' not in expr:
         return None
     try:
         lmbda = evaluator(expr)
     except SyntaxError:
         return {'action': self.Action.PRIVMSG,
                 'target': target,
                 'message': (u'Syntax Error',)
                 }
     
     if len(inspect.getargspec(lmbda)[0]) != 1:
         return None
     
     class DefinedPlugin(bot.CommandPlugin):
         name = f_name
         def process(self, c, s, t, a):
             try:
                 result = lmbda(u' '.join(a))
             except Exception, e:
                 ename = e.__class__.__name__
                 msg = '%s: %s' % (ename, e)
                 return {'action': self.Action.PRIVMSG,
                         'target': target,
                         'message': (msg,)
                         }
             else:
                 return {'action': self.Action.PRIVMSG,
Пример #6
0
 def process(self, connection, source, target, args):
     expr = u' '.join(args)
     
     try:
         result = unicode(evaluator(expr))
         msg = result[:100]
         if result != msg:
             msg += '...'
         
         return {'action': self.Action.PRIVMSG,
                 'target': target,
                 'message': (msg,)
                 }
     except SyntaxError:
         return {'action': self.Action.PRIVMSG,
                 'target': target,
                 'message': (u'Syntax Error',)
                 }
     except Exception, e:
         ename = e.__class__.__name__
         msg = '%s: %s' % (ename, e)
         return {'action': self.Action.PRIVMSG,
                 'target': target,
                 'message': (msg,)
                 }
Пример #7
0
	def do(self):
		i = 0
		while (i < self.lencora):
			if (self.marks[i] == 0):
				self._bfs(i)
				self.setnum += 1
			i += 1
		s = 0
		res = []
		fp = open(self.resultfile, 'w+')
		for i in xrange(len(self.clusters)):
			s += len(self.clusters[i])
			tmp = list(self.clusters[i])
			res.append(tmp)
			for j in xrange(len(tmp)):
				if (j == len(tmp) - 1):
					fp.write(str(tmp[j]) + '\n')
				else:
					fp.write(str(tmp[j]) + ' ')
		fp.close()
		fp = open(self.answerfile)
		lines = [line.strip().split() for line in fp]
		ans = []
		for line in lines:
			tmp = []
			for j in xrange(len(line)):
				tmp.append(int(line[j]))
			ans.append(tmp)
		ev = evaluator(self.lencora)
		ev.load_answer_clusters(ans)
		ev.evaluate_clusters(res)
Пример #8
0
 def initForDriving(self, *args, **kwargs):
     self.numsteps = 0
     self.last_action = None
     self.repeated_action_for = self.action_repeat
     self.use_evaluator = kwargs["use_evaluator"] if "use_evaluator" in kwargs else True 
     self.show_plots = kwargs["show_plots"] if "show_plots" in kwargs else True 
     if self.isSupervised and self.use_evaluator: #nur bei supervised-agents wird diese Variable explizit auf true gesetzt. Dann nutzen sie KLEINEN evalutor, sonst den von AbstractRLAgent
         self.evaluator = evaluator(self.containers, self, self.show_plots, self.conf.save_xml, ["progress", "laptime"], [100, 60] ) 
Пример #9
0
def run_code(usr_grp, mov_grp, alg, k):
    t0 = time.time()
    # Checks to see if the data file that algorithms will use exists.
    filename = "data.csv"
    data_file = Path(filename)
    if not data_file.is_file():
        rearrange_data(filename)
    t1 = time.time()
    #print("Processing raw data took: " + str(t1 - t0))

    # Reads the needed data array from file.
    df = pd.read_csv(filename)
    df_red = pp.reduce_cluster(df, usr_grp)
    df_red = df_red.iloc[:, 1:]
    df_red.columns = df_red.columns.astype(int)
    df_red = pp.reduce_cluster(df_red.transpose(), mov_grp)
    df_red = df_red.transpose()
    t2 = time.time()
    #print("Loading and preprocess data took: " + str(t2 - t1))

    # Splitting data to training and test sets.
    train, test = ev.split_data(df_red, 0.6)
    t3 = time.time()
    #print("Splitting data took: " + str(t3 - t2))

    # Removing some values from test set.
    newtest, ans_col, ans_rate = ev.corruptor(test, 1)
    t4 = time.time()
    #print("Processing test set took: " + str(t4 - t3))

    # Evaluate performance of both svd and collaborative filtering methods.
    if alg == "both":
        col_err = ev.evaluator(train, newtest, ans_col, ans_rate, "collab",
                               round(usr_grp * 0.625))
        t5 = time.time()
        print("Evaluating with collab took: " + str(t5 - t4))
        col_time = t5 - t4
        svd_err = ev.evaluator(train, newtest, ans_col, ans_rate, "svd", k)
        t5 = time.time()
        svd_time = t5 - t4
        print("Evaluating with svd took: " + str(t5 - t4))
        return col_err, col_time, svd_err, svd_time
    else:
        ev.evaluator(train, newtest, ans_col, ans_rate, alg, k)
        t5 = time.time()
Пример #10
0
 def __init__(self, playerNumber = 1, rows = 6, cols = 7, maxDepth = 4): #, initialState):
     #self.initialState = initialState
     self.evaluator = ev.evaluator()
     if playerNumber == 1:
         self.root = Node(GameState(rows=rows,cols=cols), True, self.evaluator)
     elif playerNumber == 2:
         self.root = Node(GameState(rows=rows,cols=cols), False, self.evaluator)
     else:
         raise Exception('playerNumber must be 1 or 2')
     self.maxDepth = maxDepth
Пример #11
0
 def __init__(self, state, isMaximizer, evaluator=None):
     self.state = state
     if evaluator is not None:
         self.evaluator = evaluator
     else:
         self.evaluator = ev.evaluator()
     self.updateEvalScore()
     # self.depth = depth
     self.isMaximizer = isMaximizer
     self.isTerminal = (self.evalScore == numpy.inf) or \
         (self.evalScore == -numpy.inf) or \
         (numpy.count_nonzero(self.state.data) == numpy.size(self.state.data))
     self.children = []
     self.orderToSearch = []
Пример #12
0
 def create_plugin(self, f_name, expr, target=None):
     try:
         lmbda = evaluator(expr)
     except SyntaxError:
         if target is None:
             return False
         return self.privmsg(target, 'Syntax Error')
     if target is not None:
         if len(inspect.getargspec(lmbda)[0]) != 1:
             return self.privmsg(target, 'Lambda must have one argument')
     class DefinedPlugin(bot.CommandPlugin):
         name = f_name
         def process(self, c, s, t, a):
             try:
                 result = lmbda(u' '.join(a))
             except Exception as e:
                 ename = e.__class__.__name__
                 msg = '%s: %s' % (ename, e)
                 return self.privmsg(t, msg)
             else:
                 return self.privmsg(t, result)
     return DefinedPlugin
Пример #13
0
 def initForDriving(self, *args, **kwargs):   
     assert not self.isPretrain, "You need to load the agent as Not-pretrain for a run!"
     assert self.containers is not None, "if you init the net for a run, the containers must not be None!"
     if not self.start_fresh:
         assert os.path.exists(self.folder(self.conf.pretrain_checkpoint_dir) or self.folder(self.conf.checkpoint_dir)), "I need any kind of pre-trained model"
     
     if not hasattr(self, "memory"): #einige agents haben bereits eine andere memory-implementation, die sollste nicht überschreiben
         self.memory = Memory(self.conf.memorysize, self.conf, self, load=(not self.nomemoryload))  
     super().initForDriving(*args, **kwargs) 
     self.keep_memory = kwargs["keep_memory"] if "keep_memory" in kwargs else self.conf.keep_memory
     self.freezeInfReasons = []
     self.freezeLearnReasons = [] 
     self.numInferencesAfterLearn = 0
     self.numLearnAfterInference = 0
     self.stepsAfterStart = -1 #für headstart
     self.epsilon = self.startepsilon
     self.episode_statevals = []  #für evaluator
     self.episodes = 0 #für evaluator, wird bei jedem neustart auf null gesetzt aber das ist ok dafür
     if self.use_evaluator:
         self.evaluator = evaluator(self.containers, self, self.show_plots, self.conf.save_xml,      \
                                    ["average rewards", "average Q-vals", "progress", "laptime"               ], \
                                    [(-0.1,1.3),        (-1,100),          100,        self.time_ends_episode ] ) 
Пример #14
0
 def train(self, epochs, batch_size=10):
     
     #create adversarial ground truths
     out_shape = (batch_size,) + (self.target_shape[0], self.target_shape[1], 1)
     valid_D = np.ones(out_shape)
     fake_D = np.zeros(out_shape)
     
     #define an evaluator object to monitor the progress of the training
     dynamic_evaluator = evaluator(img_res=self.img_shape, SRscale = self.SRscale)
     for epoch in range(epochs):
         for batch, (_, img_y, img_Y) in enumerate(self.data_loader.load_batch(batch_size)):
             #translate domain y to domain Y
             fake_img_Y = self.SR.predict(img_y)
             
             #train Discriminator
             D_loss_real = self.D.train_on_batch(img_Y, valid_D)
             D_loss_fake = self.D.train_on_batch(fake_img_Y, fake_D)
             D_loss = 0.5 * np.add(D_loss_real, D_loss_fake)
             
             #train Generator
             G_loss = self.Generator.train_on_batch([img_y], [valid_D, img_Y])
             
             print("[Epoch %d/%d] [Batch %d/%d]--[D: %.3f] -- [G_adv: %.3f] [G_rec: %.3f]" % (epoch, epochs,
                   batch, self.data_loader.n_batches, D_loss, G_loss[1], G_loss[2]))
             
             if batch % 25 == 0 and batch!=0:
                 """save the model"""
                 model_name="{}_{}.h5".format(epoch, batch)
                 self.SR.save("pretrained models/"+model_name)
                 print("Epoch: {} --- Batch: {} ---- saved".format(epoch, batch))
                 
                 dynamic_evaluator.model = self.SR
                 dynamic_evaluator.epoch = epoch
                 dynamic_evaluator.batch = batch
                 dynamic_evaluator.perceptual_test(5)
                 
                 sample_mean_ssim = dynamic_evaluator.objective_test(batch_size=250)
                 print("Sample mean SSIM: -------------------  %05f   -------------------" % (sample_mean_ssim))
Пример #15
0
    def __init__(self, work_path, tool_path, train_fname, test_fname, gt_fname,
                 rl_path, rl_names, sel_uni_fname):
        self.work_path = work_path
        self.tool_path = tool_path
        self.train_fname = train_fname
        self.test_fname = test_fname
        self.gt_fname = gt_fname
        self.rl_path = rl_path
        self.rl_names = rl_names
        os.chdir(self.work_path)

        # read selected universities
        self.sel_unis = []
        data = np.genfromtxt(sel_uni_fname, dtype=str, delimiter=',')
        if len(data.shape) == 1:
            self.sel_unis = copy.copy(data)
        else:
            for uni_kv in data:
                self.sel_unis.append(uni_kv[0])
        print '#universities selected:', len(self.sel_unis)

        # initialize evaluator
        self.evaluator = evaluator.evaluator(self.rl_path, self.rl_names,
                                             self.gt_fname)
Пример #16
0
#!/usr/bin/env python3
import sys
import parser, evaluator

fn = sys.argv[1]
program = parser.parse(open(fn, "rb"))
evaluator.evaluator(program)
Пример #17
0
import serial
import time
import sys
from evaluator import evaluator

myeval = evaluator()

s = serial.Serial('/dev/ttyACM1', 115200,
                  timeout=1)  # opens a serial port (resets the device!)
time.sleep(2)  # give the device some time to startup (2 seconds)

# write to the device’s serial port
s.write(b"a[AB]\n")  # set the device address to AB
time.sleep(0.1)  # wait for settings to be applied
# print(s.readline())

s.write(b"c[1,0,5]\n")  # set number of retransmissions to 5
time.sleep(0.1)  # wait for settings to be applied
# print(s.readline().decode('unicode_escape').strip())

s.write(b"c[0,1,30]\n"
        )  # set FEC threshold to 30 (apply FEC to packets with payload >= 30)
time.sleep(0.1)  # wait for settings to be applied

message = "\0"
payload = 1
s.write(("m[" + message + ",CD]\n").encode("ascii"))
"""
while True:
    print(s.readline().decode('unicode_escape').strip())
"""
Пример #18
0
import datagate
import learner
import evaluator
import sys

NumFeat = 46
NumberOfGames = int(sys.argv[1])
pastdata = datagate.getdata(NumberOfGames)

# this part uses learner.py , which applies linear regression and get a vector of coefficients
print("Begin learning...")
coe = learner.learner(pastdata, NumFeat)
print(coe)

# this part is to test whether learner works reasonably by
# allowing manual input 
while (True):
	inp = input().split(" ")
	for i in range(0, len(inp)): inp[i] = int(inp[i])
	if (inp[0] == -1): break
	if (len(inp) != NumFeat):
		print("Not a legal vector\n")
	else:
		print(evaluator.evaluator(coe, inp, NumFeat))

Пример #19
0
 if(input_var[0] == 'exit'):
     break
 elif(input_var[0] == 'help'):
     if len(input_var) == 1:
         print("Commands: analyze, generate, evaluate, subset, exit")
         print("Type help _command_ for more information")
     else:
         if(input_var[1] == 'analyze'):
             print("Mandatory args:input, output")
         if(input_var[1] == 'generate'):
             print("Mandatory args: input, output")
             print("Optional args: size, randomUsers, randomOrders")
         if(input_var[1] == 'evaluate'):
             print("Mandatory args: file1, file2")
         if(input_var[1] == 'subset'):
             print("Mandatory args:input, output, size")
 elif(input_var[0] == 'analyze'):
     analyzer.analyze('../Data/'+args['input'],'../Data/'+args['output'])
 elif(input_var[0] == 'generate'):
     randomUsers = 'randomUsers' in args
     randomOrders = 'randomOrders' in args
     generator.generate('../Data/'+args['input'],'../Data/'+args['output'], int(args.get('size', -1)), randomUsers, randomOrders)
 elif(input_var[0] == 'evaluate'):
     e = evaluator.evaluator('../Data/'+args['file1'],'../Data/'+args['file2'])
 elif(input_var[0] == 'subset'):
     df = dataReader.readOrders('../Data/'+args['input'])
     subset_df = dataWriter.makeOrderSubset(df, int(args['size']))
     dataWriter.writeOrderFileRandom(subset_df, '../Data/'+args['output'])
 else:
     print("Commands: analyze, generate, evaluate, subset, exit")
     print("Type help _command_ for more information")
Пример #20
0
__author__ = "rogersjeffrey"
"""
  use the heuristics to identify if the given sentence has a possible interaction or not
  and print the stats
"""
import identify_interaction as id
import evaluator as evalu
import pprint
import utils
import cPickle as pickle

id_instance = id.indentify_interaction()
eval_instance = evalu.evaluator()
files_list = [
    "models/bigram_diff.p",
    "models/bigram_diff_total.p",
    "models/trigram_diff.p",
    "models/trigram_diff_total.p",
]
id_instance.load_model(files_list)
# paths=["/Users/rogersjeffrey/Downloads/DDICorpus/DDICorpus/Train/DrugBank","/Users/rogersjeffrey/Downloads/DDICorpus/DDICorpus/Train/Medline"]
paths = ["./Test/Test for DDI Extraction task/DrugBank", "./Test/Test for DDI Extraction task/Medline"]
files = utils.get_files_list(paths)

total_sentence_count = 0
for file in files:
    result = id_instance.identify_interaction(file)
    for sentence_id in result:
        total_sentence_count += 1
        prediction = result[sentence_id]
Пример #21
0
	def _abNegamax(self, board, maxDepth, depth, alpha, beta):
		alphaOriginal = alpha

		zhash = zobrist_hash(board)
		entry = self._transTable.table.get(zhash)
		if entry and entry.depth >= maxDepth - depth:
			if entry.scoreType == self._transTable.EXACT_SCORE:
				self._transTable.hits = self._transTable.hits + 1
				return (entry.move, entry.score, entry.finalBoard)
			elif entry.scoreType == self._transTable.LOWER_BOUND_SCORE:
				alpha = max(alpha, entry.score)
			else:
				beta = min(beta, entry.score)
			if alpha >= beta:
				return (entry.move, entry.score, entry.finalBoard)

		newEntry = False
		if not entry:
			entry = transTable.transTableEntry()
			entry.zobristHash = zhash
			newEntry = True
			entry.result = board.result()
		entry.depth = maxDepth - depth
		entry.move = None

		#result = board.result()
		if (depth == maxDepth or entry.result != "*"):
			entry.score = evaluator(board, entry.result)
			entry.finalBoard = board
			if (self._transTable.size == self._transTable.maxSize and newEntry):
				self._transTable.table.popitem()
				self._transTable.size = self._transTable.size - 1
			self._transTable.table[entry.zobristHash] = entry
			self._transTable.size = self._transTable.size + 1
			return ('', entry.score, board)

		maxScore = -(1<<64)
		score = maxScore
		bestMove = None
		finalBoard = None

		for move in board.legal_moves:
			board.push(move)
			_, score, finalBoard = self._abNegamax(board, maxDepth, depth + 1, -beta, -alpha)
			score = -score
			board.pop()

			if score > maxScore:
				maxScore = score
				bestMove = move

			alpha = max(alpha, score)
			if alpha >= beta:
				break

		entry.score = maxScore
		entry.move = bestMove
		entry.finalBoard = finalBoard
		if maxScore <= alphaOriginal:
			entry.scoreType = self._transTable.UPPER_BOUND_SCORE
		elif maxScore >= beta:
			entry.scoreType = self._transTable.LOWER_BOUND_SCORE
		else:
			entry.scoreType = self._transTable.EXACT_SCORE
		if (self._transTable.size == self._transTable.maxSize and newEntry):
			self._transTable.table.popitem()
			self._transTable.size = self._transTable.size - 1
		self._transTable.table[entry.zobristHash] = entry
		self._transTable.size = self._transTable.size + 1
		return (bestMove, maxScore, finalBoard)
Пример #22
0
            random.shuffle(data)
            delta = [0 for ii in xrange(self.feature_num)]
            for j in xrange(len(data)):
                for k in xrange(self.feature_num):
                    z = self.sigmoid(
                        sum(map(lambda x, y: x * y, self.w, data[j])))
                    delta[k] += (z - self.train_label[j]) * data[j][k]
                if j % self.batch_size == 0 and j != 0:
                    for k in xrange(self.feature_num):
                        self.w[k] -= delta[k] * self.learning_rate
                    delta = [0 for ii in xrange(self.feature_num)]
            if i % 100 == 0:
                print 'Round %d, loss:%.5f' % (i, self.get_loss())

    def get_pred(self):
        pred = []
        for i in xrange(len(self.test_data)):
            temp = 0
            for j in xrange(self.feature_num):
                temp += self.w[j] * self.test_data[i][j]
            pred.append(self.sigmoid(temp))
        return pred


if __name__ == '__main__':
    ml = logistic_regressor('car', 0.2)
    ml.train_gd()
    pred = ml.get_pred()
    lr_eval = evaluator(ml.test_label, pred, 'logistic regression')
    lr_eval.plot_roc()
Пример #23
0
from interpretor import interpretor
from configurer import configurer
from evaluator import evaluator
from admin import admin

itp=interpretor('localhost',27017)
config=configurer(itp)
eva=evaluator(itp)
ad=admin(eva,config)




"""This script performs a complete test of our Python+MongoDB OrBAC single Tenant implementation"""

"""Test Scenario
1. we start from zero having a tenant called "apple"
2. we insert all administrative views including: srole,activity,view,role_assignment,activity_assignment,licence,cross_licence
3. we initialize it with assigning "John" to subject, "admin" to role, insert delete to action, insertActivity, deleteActivity and manage to activity and the first licence " John is permitted to manage licence in apple, also "nominal" to context
4. we then use John to create licences for himself for all administrative views, then use John to create different users,actions, resources and assign them to different abstract roles,activities,views
5. use John to assign users privileges
6. use John to assign admin privileges to someone
"""
"""1. we start from zero having a tenant called apple"""
#create tenant
config.CreateTenant('null','apple')
"""2. we insert all administrative views including: subject,action,object,role,activity,view,role_assignment,activity_assignment,licence
"""
#create administrative views
config.AssignView('null','apple',{'_id':'context','attr':{}})
config.AssignView('null','apple',{'_id':'role','attr':{}})
Пример #24
0
    def _abNegamax(self, board, maxDepth, depth, alpha, beta):
        alphaOriginal = alpha

        zhash = zobrist_hash(board)
        entry = self._transTable.table.get(zhash)
        if entry and entry.depth >= maxDepth - depth:
            if entry.scoreType == self._transTable.EXACT_SCORE:
                self._transTable.hits = self._transTable.hits + 1
                return (entry.move, entry.score, entry.finalBoard)
            elif entry.scoreType == self._transTable.LOWER_BOUND_SCORE:
                alpha = max(alpha, entry.score)
            else:
                beta = min(beta, entry.score)
            if alpha >= beta:
                return (entry.move, entry.score, entry.finalBoard)

        newEntry = False
        if not entry:
            entry = transTable.transTableEntry()
            entry.zobristHash = zhash
            newEntry = True
            entry.result = board.result()
        entry.depth = maxDepth - depth
        entry.move = None

        #result = board.result()
        if (depth == maxDepth or entry.result != "*"):
            entry.score = evaluator(board, entry.result)
            entry.finalBoard = board
            if (self._transTable.size == self._transTable.maxSize
                    and newEntry):
                self._transTable.table.popitem()
                self._transTable.size = self._transTable.size - 1
            self._transTable.table[entry.zobristHash] = entry
            self._transTable.size = self._transTable.size + 1
            return ('', entry.score, board)

        maxScore = -(1 << 64)
        score = maxScore
        bestMove = None
        finalBoard = None

        for move in board.legal_moves:
            board.push(move)
            _, score, finalBoard = self._abNegamax(board, maxDepth, depth + 1,
                                                   -beta, -alpha)
            score = -score
            board.pop()

            if score > maxScore:
                maxScore = score
                bestMove = move

            alpha = max(alpha, score)
            if alpha >= beta:
                break

        entry.score = maxScore
        entry.move = bestMove
        entry.finalBoard = finalBoard
        if maxScore <= alphaOriginal:
            entry.scoreType = self._transTable.UPPER_BOUND_SCORE
        elif maxScore >= beta:
            entry.scoreType = self._transTable.LOWER_BOUND_SCORE
        else:
            entry.scoreType = self._transTable.EXACT_SCORE
        if (self._transTable.size == self._transTable.maxSize and newEntry):
            self._transTable.table.popitem()
            self._transTable.size = self._transTable.size - 1
        self._transTable.table[entry.zobristHash] = entry
        self._transTable.size = self._transTable.size + 1
        return (bestMove, maxScore, finalBoard)
Пример #25
0
 filejson = open("../topo/q_fattree_k_8_MINEDGE_SPT.json")
 #filejson = open("../topo/q_fattree_k_10_SIMPLE_SPT.json")
 topojson = json.load(filejson)
 entries = topojson['entries']
 entries = sorted(entries.items(), key=lambda x: int(x[0]))
 #for k in range(len(entries)):
 #	print entries[k][0] , ":" , entries[k][1]
 src_file = entries[0][1]['src_file']
 load_json_topology("../topo/" + src_file)
 indexer = 0
 shortest_paths_all()
 for k in range(len(entries)):
     #print entries[k][0]
     if int(entries[k][0]) == 501:
         break
     entry = entries[k][1]
     source = entry['src_host']
     destination = entry['dest_hosts']
     algorithm1 = entry['algorithm']
     algorithm = algorithm1
     flow_rate = entry['flow_rate']
     #print "** Generating port sets for trees **"
     tree_no = int(entries[k][0])
     count = tree_ports_all(tree_no, source, destination, algorithm,
                            flow_rate)
 #for entry in final_trees:
 #	print entry
 evaluator_obj = evaluator(switches, hosts, dpids)
 evaluator_obj.save_final_trees(final_trees, final_trees_temp)
 evaluator_obj.evaluate(final_trees, "avalanche")
 show_final_trees()
Пример #26
0
def main():
    # chainer.config.autotune = True
    # chainer.config.cudnn_fast_batch_normalization = True

    print("dataset", CONFIG.dataset)
    print("output_dir:", output_dir)

    if CONFIG.dataset == "tennis_serve":
        dataset = load_penn_action(dataset_dir=CONFIG.dataset_path,
                                   stride=CONFIG.penn_action.stride,
                                   dict_ok=False)
        dataset_train = dataset[:115]
        dataset_test = dataset[115:]
    elif CONFIG.dataset == "pouring":
        dataset_train, dataset_test = load_pouring(
            dataset_dir=CONFIG.dataset_path,
            stride=CONFIG.pouring.stride,
            dict_ok=False)
    elif CONFIG.dataset == "multiview_pouring":
        dataset_train, dataset_test = load_multiview_pouring(
            dataset_dir=CONFIG.dataset_path,
            stride=CONFIG.multiview_pouring.stride,
            dict_ok=False)
    else:
        print("dataset error.")
        exit()

    dataset_train = load_dataset(dataset_train,
                                 augment=None,
                                 img_size=CONFIG.img_size,
                                 k=CONFIG.k)
    dataset_test = load_dataset(dataset_test,
                                augment=None,
                                img_size=CONFIG.img_size,
                                k=CONFIG.k)
    train_iter = MultiprocessIterator(dataset_train,
                                      batch_size=CONFIG.batchsize,
                                      n_processes=6)
    test_iter = MultiprocessIterator(dataset_test,
                                     batch_size=1,
                                     n_processes=6,
                                     repeat=False,
                                     shuffle=None)

    model = tcc(use_bn=True, k=CONFIG.k)
    device = chainer.get_device(OPTION.device)
    device.use()
    model.to_device(device)

    optimizer = make_optimizer(model)

    if CONFIG.weight_decay_rate != 0:
        for param in model.params():
            param.update_rule.add_hook(WeightDecay(CONFIG.weight_decay_rate))

    updater = tcc_updater({"main": train_iter}, optimizer, device)

    trainer = Trainer(updater, (CONFIG.iteration, 'iteration'), out=output_dir)

    display_interval = (100, 'iteration')
    plot_interval = (100, 'iteration')
    trainer.extend(extensions.ProgressBar(update_interval=5))
    trainer.extend(
        extensions.LogReport(trigger=display_interval, filename='log.txt'))
    trainer.extend(extensions.PrintReport(
        ["iteration", "main/loss", "test/loss", "test/tau", "elapsed_time"]),
                   trigger=display_interval)

    trainer.extend(extensions.PlotReport(["main/loss", "test/loss"],
                                         "iteration",
                                         file_name="loss.png"),
                   trigger=plot_interval)

    trainer.extend(evaluator(test_iter,
                             model,
                             device,
                             epoch=plot_interval[0],
                             out=output_dir),
                   trigger=plot_interval)
    trainer.extend(extensions.PlotReport(["test/tau"],
                                         "iteration",
                                         file_name="tau.png"),
                   trigger=plot_interval)

    trainer.extend(extensions.snapshot_object(model,
                                              "{.updater.iteration}" + ".npz"),
                   trigger=plot_interval)

    trainer.run()
Пример #27
0
    def train(self, epochs, batch_size=10, sample_interval=50):
        #every sample_interval batches, the model is saved and sample images are generated and saved
        
        start_time = datetime.datetime.now()
        def chop_microseconds(delta):
            #utility to help avoid printing the microseconds
            return delta - datetime.timedelta(microseconds=delta.microseconds)

        """ Adversarial loss ground truths for patchGAN discriminators"""
        
        # Calculate output shape of the patchGAN discriminators based on the target shape
        len_x = self.img_shape[0]
        len_y = self.img_shape[1]
        self.D1_out_shape = (len_x, len_y, 1)
        #define the adversarial ground truths for D1
        valid_D1 = np.ones((batch_size,) + self.D1_out_shape)
        fake_D1 = np.zeros((batch_size,) + self.D1_out_shape)
        print("valid D1: ", valid_D1.shape)
        #similarly for D2
        len_x = self.target_res[0]
        len_y = self.target_res[1]
        self.D2_out_shape = (len_x, len_y, 1)
        
        valid_D2 = np.ones((batch_size,) + self.D2_out_shape)
        fake_D2 = np.zeros((batch_size,) + self.D2_out_shape)
        
        #define an evaluator object to monitor the progress of the training
        dynamic_evaluator = evaluator(img_res=self.img_shape, SRscale = self.SRscale)
        for epoch in range(epochs):
            for batch, (img_x, img_y, img_Y) in enumerate(self.data_loader.load_batch(batch_size)):

                # Update the discriminators 

                # Make the appropriate generator translations for discriminator training
                fake_y = self.G1.predict(img_x) #translate x to denoised x
                fake_Y = self.SR.predict(fake_y) #translate denoised x to super-resolved x
                
                # Train the discriminators (original images = real / translated = fake)
                #we will need different adversarial ground truths for the discriminators
                D1_loss_real = self.D1.train_on_batch(img_y, valid_D1)
                D1_loss_fake = self.D1.train_on_batch(fake_y, fake_D1)
                D1_loss = 0.5 * np.add(D1_loss_real, D1_loss_fake)
                
                D2_loss_real = self.D2.train_on_batch(img_Y, valid_D2)
                D2_loss_fake = self.D2.train_on_batch(fake_Y, fake_D2)
                D2_loss = 0.5 * np.add(D2_loss_real, D2_loss_fake)
                
                # ------------------
                #  Train Generators
                # ------------------
                
                blur_img_x = self.blur.predict(img_x) #passes img_x through the blurring kernel to provide GT.
                
                # Train the combined models (all generators basically)
                cyclic_loss_1 = self.cyclic1.train_on_batch([img_x], [valid_D1, img_x, blur_img_x, fake_y])
                cyclic_loss_2 = self.cyclic2.train_on_batch([fake_y], [valid_D2, img_x, fake_Y])
                
                """update log values"""
                #save the training point (measured in epochs)
                self.training.append(round(epoch+batch/self.data_loader.n_batches, 3))
                #adversarial losses
                self.D1_loss.append(D1_loss[0])
                self.D2_loss.append(D2_loss[0])
                self.G1_adv.append(cyclic_loss_1[1])
                self.SR_adv.append(cyclic_loss_2[1])
                
                #1cycleGAN losses
                self.cyc1.append(cyclic_loss_1[2])
                self.blur1.append(cyclic_loss_1[3])
                self.tv1.append(cyclic_loss_1[4])
                
                #2nd cycleGan losses
                self.cyc2.append(cyclic_loss_2[2])
                self.tv2.append(cyclic_loss_2[3])
        
                
                elapsed_time = datetime.datetime.now() - start_time
                elapsed_time = chop_microseconds(elapsed_time)
                print("[elapsed time: %s][Epoch %d/%d] [Batch %d/%d] -- [D1_adv: %.3f D2_adv: %.3f] -- [G1_adv: %.3f - SR_adv: %.3f - cyc1: %.4f - cyc2: %.4f]" % (elapsed_time, epoch, epochs,
                      batch, self.data_loader.n_batches, D1_loss[0], D2_loss[0], cyclic_loss_1[1], cyclic_loss_2[1], cyclic_loss_1[2], cyclic_loss_2[2]))
                
                if batch % 20 == 0 and not(batch == 0 and epoch == 0):
                    """save the model"""
                    model_name="{}_{}.h5".format(epoch, batch)
                    self.SR.save("models/"+model_name)
                    print("Epoch: {} --- Batch: {} ---- saved".format(epoch, batch))
                    
                    dynamic_evaluator.model = [self.G1, self.SR]
                    dynamic_evaluator.epoch = epoch
                    dynamic_evaluator.batch = batch
                    dynamic_evaluator.perceptual_test(5)
                    
                    sample_mean_ssim = dynamic_evaluator.objective_test(batch_size=250)
                    print("Sample mean SSIM: -------------------  %05f   -------------------" % (sample_mean_ssim))
                    self.ssim_eval_time.append(round(epoch+batch/self.data_loader.n_batches, 3))
                    self.ssim.append(sample_mean_ssim)
                    
                    self.log()
Пример #28
0
     flowgraph.start()
     try:
         raw_input('\nPress Enter after the USRP has finished processing '
                   '(red light goes out). Unfortunately, we currently have '
                   'no way to have the flowgraph automatically finish after all '
                   'bits have been transmitted (see https://lists.gnu.org/'
                   'archive/html/discuss-gnuradio/2016-07/msg00319.html):\n ')
     except EOFError:
         pass
     flowgraph.stop()
     flowgraph.wait()
 elif args.evaluator:
     print("Running evaluation flowgraph")
     evaluator_params = config['evaluator_params'].copy()
     evaluator_params.update(config['shared_params'])
     flowgraph = evaluator(**evaluator_params)
     flowgraph.start()
     try:
         raw_input('Press Enter after all files have been read by the folder source.\n')
     except EOFError:
         pass
     flowgraph.stop()
     flowgraph.wait()
     if 'report_output_file' in config and config['report_output_file']:
         print("Saving statistics to {}".format(config['report_output_file']))
         generate_report(config['report_output_file'], config, flowgraph.starcoder_utils_prbs_sink_pdu_0.statistics)
     if 'plot_output_file' in config and config['plot_output_file']:
         print("Saving output plot to {}".format(config['plot_output_file']))
         plot_collected_packets(flowgraph.starcoder_utils_prbs_sink_pdu_0.collected_packets, config['plot_output_file'])
 elif args.filegen:
     print("Running IQ file")
Пример #29
0
 def make_evaluator(self):
     return evaluator(self.__names,self.__weights)
Пример #30
0
    def train(self, epochs, batch_size=1, sample_interval=50):
        #every sample_interval batches, the model is saved and sample images are generated and saved

        start_time = datetime.datetime.now()

        # Adversarial loss ground truths
        valid = np.ones((batch_size, ) + self.disc_patch)
        print("valid shape:{}".format(valid.shape))
        fake = np.zeros((batch_size, ) + self.disc_patch)

        dynamic_evaluator = evaluator(img_res=self.img_shape,
                                      SRscale=self.SRscale)
        for epoch in range(epochs):
            for batch_i, (imgs_A, imgs_B) in enumerate(
                    self.data_loader.load_batch(batch_size)):

                # ----------------------
                #  Train Discriminators
                # ----------------------

                # Translate images to opposite domain
                fake_B = self.G.predict(imgs_A)
                imgs_A_vgg = self.block2_conv1_LR.predict(imgs_A)
                imgs_B_vgg = self.block2_conv1_SR.predict(imgs_B)

                # Train the discriminators (original images = real / translated = Fake)
                d_loss_real = self.D2.train_on_batch(imgs_B, valid)
                d_loss_fake = self.D2.train_on_batch(fake_B, fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
                self.log_D_loss.append(d_loss[0])

                # ------------------
                #  Train Generators
                # ------------------

                # Train the generators
                g_loss = self.combined.train_on_batch(
                    [imgs_A, imgs_B],
                    [valid, imgs_A_vgg, imgs_B_vgg, imgs_B_vgg, imgs_B])
                elapsed_time = datetime.datetime.now() - start_time

                training_time_point = epoch + batch_i / self.data_loader.n_batches
                self.log_TrainingPoints.append(
                    np.around(training_time_point, 3))
                self.log_G_loss.append(g_loss[1])
                self.log_ReconstructionLoss.append(np.mean(g_loss[2:4]))
                self.log_ID_loss.append(g_loss[4])
                self.log_TotalVariation.append(g_loss[5])

                # Plot the progress
                print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, ID: %05f, TV: %05f] time: %s " \
                                                                        % ( epoch, epochs,
                                                                            batch_i, self.data_loader.n_batches,
                                                                            d_loss[0], 100*d_loss[1],
                                                                            g_loss[0],
                                                                            g_loss[1],
                                                                            np.mean(g_loss[2:4]),
                                                                            g_loss[4],
                                                                            g_loss[5],
                                                                            elapsed_time))

                # If at save interval => save generated image samples
                if batch_i % sample_interval == 0:
                    print("Epoch: {} --- Batch: {} ---- saved".format(
                        epoch, batch_i))
                    dynamic_evaluator.model = self.G
                    dynamic_evaluator.epoch = epoch
                    dynamic_evaluator.batch = batch_i
                    dynamic_evaluator.perceptual_test(5)

                    sample_mean_ssim = dynamic_evaluator.objective_test(
                        batch_size=800)
                    print(
                        "Sample mean SSIM: -------------------  %05f   -------------------"
                        % (sample_mean_ssim))
                    self.log_sample_ssim_time_point.append(
                        np.around(training_time_point, 3))
                    self.log_sample_ssim.append(sample_mean_ssim)
                    #self.sample_images(epoch, batch_i)
                    self.G.save("models/{}_{}.h5".format(epoch, batch_i))
                    self.logger()
Пример #31
0
    preprocessor.transform_test_sent_fsoftmax_v2(config)

    print("pairing data...")

    preprocessor.pair_train_sent_fsoftmax(config)
    preprocessor.pair_valid_sent_fsoftmax(config)
    preprocessor.pair_test_sent_fsoftmax(config)

    
    
    print("training model...")

    import trainer_hier_fsoftmax_v2
    trainer_hier_fsoftmax_v2.trainer(config)

    

    import decoder_hier_fsoftmax_v2
    decoder_hier_fsoftmax_v2.decoder(config)
    '''

    import evaluator
    evaluator.evaluator(config)


    import read_kp_kp20k
    read_kp_kp20k.reader(config)



Пример #32
0
            dataset,
            batch_size=cfg.TRAIN.BATCH_SIZE,
            drop_last=True,
            shuffle=bshuffle,
            num_workers=int(cfg.WORKERS))

        # Define models and go to training
        algo = trainer(output_dir, dataloader, dataset)
        algo.train()
    else:
        dataset = TestDataset(cfg.DATA_DIR,
                              split_dir,
                              base_size=cfg.TREE.BASE_SIZE)
        assert dataset
        dataloader = torch.utils.data.DataLoader(
            dataset,
            batch_size=cfg.TRAIN.BATCH_SIZE,
            drop_last=True,
            shuffle=bshuffle,
            num_workers=int(cfg.WORKERS))

        # Define models and go to evaluating
        algo = evaluator(output_dir, dataloader, dataset)

        if dataset.acts_dict is None:
            algo.dump_fid_acts(cfg.DATA_DIR, split_dir)
            dataset.acts_dict = load_acts_data(cfg.DATA_DIR, split_dir)

        algo.evaluate(split_dir, dataset.imsize)
    end_t = time.time()
    print('Total time for {0}:'.format(split_dir), end_t - start_t)