Exemplo n.º 1
0
 def __init__(self,evaluator=None,tag='f',scale=1.0,variables=None,):
     
     Evaluator.__init__(self)
     
     self.evaluator = evaluator
     self.tag       = tag
     self.scale     = scale
     self.variables = variables
Exemplo n.º 2
0
 def __init__( self, evaluator=None, 
               tag='c', sense='=', edge=0.0, 
               scale=1.0,
               variables=None):
     
     Evaluator.__init__(self)
     
     self.evaluator = evaluator
     self.tag       = tag
     self.sense     = sense
     self.edge      = edge
     self.scale     = scale
     self.variables = variables
Exemplo n.º 3
0
def evaluate(goldfile, autofile, joint, model):
    print "goldfile -- ", goldfile
    print "autofile -- ", autofile
    f = codecs.open(goldfile, "r", "utf-8")
    gold = f.readlines()
    f.close()
    f = codecs.open(autofile, "r", "utf-8")
    auto = f.readlines()
    f.close()

    if model == "maxent":
        goldTags = [item.split()[1] for item in gold if item.split() != []]
        autoTags = [chooseMax(item.split()[1:]) for item in auto if item.split() != []]
        idTags = [item.split()[0] for item in auto if item.split() != []]
        print "koala !!! len of gold tags --", len(goldTags), "len of auto tags --", len(autoTags)
    elif model == "crfpp":
        pass
    elif model == "crfsuite":
        pass
    
    if joint == "joint":
        print "\nfirst tag in joint tags --"
        evaluator = Evaluator([tag.split("_")[0] for tag in goldTags], [tag.split("_")[0] for tag in autoTags], gold)
        correct, total = evaluator.evaluate()
        print "\nsecond tag in joint tags --"
        evaluator = Evaluator([tag.split("_")[1] for tag in goldTags], [tag.split("_")[1] for tag in autoTags], gold)
        correct, total = evaluator.evaluate()
    else:
        evaluator = Evaluator(goldTags, autoTags, gold)
        correct, total = evaluator.evaluate()
    return correct, total, zip(idTags, autoTags)
Exemplo n.º 4
0
	def __init__ ( self, visitor, obj=None, config=None, type=TYPE_TEXT):
		"""___init___ () - Inits the driver
		"""
		self.__gobject_init__()
		self.visitor = visitor
		if obj == None:
			self.layouts = {}
			self.widgets = {}
			self.static_widgets = []
			self.fake = False
			self.ser = None
			self.display_name = ''
			self.serial_number = ''
		else:
			self.layouts = obj.layouts
			self.fake = obj.fake
			self.ser = obj.ser
			self.display_name = obj.display_name
			self.device_name = obj.device_name
			self.path = obj.path
			self.books = obj.books
			self.current_command = obj.current_command
			self.command_queue0 = obj.command_queue0
			self.command_queue1 = obj.command_queue1
			self.response_time_init = time.time()
		self.TYPE = type
		self.current_layout = None
		self.app = visitor
		#self.debug = visitor.debug
		self.layoutG = self.GetLayout()
		self.current_incr = 0
		self.layout_id = None
		self.transition_id = None
		CFG.__init__(self, config)
		Evaluator.__init__(self)
		self.connect("layout-transition-finished", self.StartLayout)
		self.AddFunction("transition", 1, self.my_transition)
Exemplo n.º 5
0
    ml.actualizarRatings()
    print("Loading movie ratings...")
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

contentKNN = ContentKNNAlgorithm()
evaluator.AddAlgorithm(contentKNN, "ContentKNN")

# Just make random recommendations
#Random = NormalPredictor()
#evaluator.AddAlgorithm(Random, "Random")
#evaluator.Evaluate(True)

recomendations = evaluator.globalRecommendation()
print("get_top_n")
evaluator.get_top_n(recomendations)
#evaluator.SampleTopNRecs(ml,268,10)
Exemplo n.º 6
0
import numpy as np
from GA.Initializer import Heuristic_Initializer
from GA.Selector import Roulette_Selector
from GA.Recombiner import Recombiner
from GA.Mutator import Route_Mutator
from Task_Initializer import Task
from Evaluator import Evaluator
from copy import deepcopy

task = Task()
init = Heuristic_Initializer()
pop = init.initialize_pop(task,10)

evaluator = Evaluator()
pop = evaluator.evaluate(pop,task)

ind = np.random.choice(pop)
customers_before = [customer for route in ind['solution'].values() for customer in route]
assert len(customers_before) == len(np.unique(customers_before))

mutator = Route_Mutator()
pop = mutator.mutate(pop,0.8)

ind = np.random.choice(pop)

customers_num = 0
for route in ind['solution'].values():
    customers_num+=len(route)

customers = [customer for route in ind['solution'].values() for customer in route]
Exemplo n.º 7
0
def LoadMovieLensData():
    ml = MovieLens()
    print("Loading movie ratings...")
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

contentKNN = ContentKNNAlgorithm()
evaluator.AddAlgorithm(contentKNN, "ContentKNN")

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")

evaluator.Evaluate(False)

evaluator.SampleTopNRecs(ml)
Exemplo n.º 8
0
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# 加载数据
(ml, evaluationData, ranking) = LoadMovieLensData()

#
evaluator = Evaluator(evaluationData, rankings)

# SVD
SVD = SVD()
evaluator.AddAlgorithm(SVD, "SVD")

# SVD++
SVDpp = SVDpp()
evaluator.AddAlgorithm(SVDpp, "SVD++")

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")

# Fight!
evaluator.Evaluate(False)
Exemplo n.º 9
0
    print("Loading movie ratings...")
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

#Autoencoder
AutoRec = AutoRecAlgorithm()
evaluator.AddAlgorithm(AutoRec, "AutoRec")

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")

# Fight!
evaluator.Evaluate(True)

evaluator.SampleTopNRecs(ml)
Exemplo n.º 10
0
class AiMove():
    layer_tree = 3
    best_state = 0
    best_utility = 0
    which_finger = 0

    def predictMove(self, game_state: GameState):
        self.eval = Evaluator()
        probability_move = [game_state]

        utilityValue = []
        tr2 = []
        tr = probability_move
        if self.eval.evaluate(probability_move[0],1) == 10000:
            print("AI WINNING")
            return -1, 10000, None

        ##print(" PROB -------")
        tr[0].print()
        # ##print(" PROB 2-------")
        tr, _, _  = self.tapToAll(tr[0])
        for i in range(len(tr)):
            # self.eval.evaluate(tr[i], 1)
            utilityValue.append(self.eval.evaluate(tr[i], 1))

        # #print("BEST ",AiMove.best_utility, "Finger" ,AiMove.which_finger,"\tstate ", end='')
        # AiMove.best_state.#print()
        # tr[1].#print()
        #print("PERTAMA " ,utilityValue)

        for i in range(len(tr)):
            # self.eval.evaluate(tr[i], 1)
            #print("ITER KE DUA", i)
            # tr[i].print()
            tr2, be_util, be_state = self.tapToAll(tr[i])
            #print("berapa kali sih ", be_util)
            if utilityValue[i] > be_util:
                utilityValue[i] = be_util

        #print("KEDUA ", utilityValue)

        best_finger = 0
        best_util = 0
        best_state = None
        i = 0
        for i in range(len(utilityValue)):
            if i == 0:
                best_util = utilityValue[i]
            else:
                # #print("ACCES ",i," ", utilityValue[i])
                if utilityValue[i] > be_util:
                    best_util = utilityValue[i]
                    best_finger = i

        best_state = tr[best_finger]
        return best_finger, best_util, best_state
        # 0 ai kiri ke kiri
        # 1 ai kiri ke kanan
        # 2 ai kanan ke kiri
        # 3 ai kanan ke kanan
        # 4 ai divide

        # for i in range(len(tr2)):
        #     # self.eval.evaluate(tr[i], 1)
        #     #print("ITER KE TIGA", i)
        #     tr[i].#print()
        #     tr3, be_util, be_state = self.tapToAll(tr2[i])
        #     #print("berapa kali sih2 ", be_util)
        #     if utilityValue[i] < be_util:
        #         utilityValue[i] = be_util
        #
        # #print("KETIGA ", utilityValue)
        #
        # for i in range(len(tr)):
        #     # self.eval.evaluate(tr[i], 1)
        #     #print("ITER KE EMPAT", i)
        #     tr[i].#print()
        #     tr2, be_util, be_state = self.tapToAll(tr[i])
        #     #print("berapa kali sih2 ", be_util)
        #     if utilityValue[i] > be_util:
        #         utilityValue[i] = be_util
        #
        # #print("KEEMPAT ", utilityValue)


    def tapToAll(self, game_state: GameState):
        probability_move = []
        #print(" PROB 2-------")
        player_left = game_state.values[0][0]
        player_right = game_state.values[0][1]

        ai_left = game_state.values[1][0]
        ai_right = game_state.values[1][1]

        blocked_finger = [0,0,0,0,0]
        blocked_finger_pl = [0, 0, 0, 0,0]

        if player_left == 0:
            blocked_finger_pl[0] = 1
            blocked_finger_pl[1] = 1
        if player_right == 0:
            blocked_finger_pl[2] = 1
            blocked_finger_pl[3] = 1
        if ai_left == 0:
            blocked_finger[0] = 1
            blocked_finger[1] = 1
        if ai_right == 0:
            blocked_finger[2] = 1
            blocked_finger[3] = 1

        if(game_state.player == 1): #Ai turn
            probability_move.append(
                GameState(0, (player_left + ai_left) % 5, player_right, ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_left) % 5, ai_left, ai_right))
            probability_move.append(
                GameState(0, (player_left + ai_right) % 5, player_right, ai_left, ai_right))
            probability_move.append(
                GameState(0, player_left, (player_right + ai_right) % 5, ai_left, ai_right))
            if ((ai_left + ai_right) % 2 ==0):
                probability_move.append(
                    GameState(0, player_left, player_right, int((ai_left + ai_right)/2), int((ai_left + ai_right)/2)))
        else: #Player turn
            probability_move.append(
                GameState(1, player_left, player_right, (player_left + ai_left) % 5,ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left, (player_left + ai_right) % 5))
            probability_move.append(
                GameState(1, player_left, player_right, (player_right + ai_left) % 5, ai_right))
            probability_move.append(
                GameState(1, player_left, player_right, ai_left, (player_right + ai_right) % 5))
            if ((player_left + player_right) % 2 ==0):
                probability_move.append(
                    GameState(1, int((player_left + player_right)/2), int((player_left + player_right)/2), ai_left, ai_right))
        # for i in range(len(probability_move)):
        #     probability_move[i].#print()
        # pass
        #
        for i in range(len(probability_move)):
            rating = self.eval.evaluate(probability_move[i],1)
            # if i == 0:
            #     AiMove.best_utility = rating
            #     AiMove.best_state = probability_move[i]
            if probability_move[i].player == 0:
                if i == 0:
                    AiMove.best_utility = -11111
                if rating > AiMove.best_utility and (blocked_finger[i] != 1 and blocked_finger_pl[i] != 1):
                    #print("INI MASUK ai", i)
                    AiMove.best_utility = rating
                    AiMove.best_state = probability_move[i]
                    AiMove.which_finger = i
            else:
                if i == 0:
                    AiMove.best_utility = 11111
                #print("i : ", i, " block ", blocked_finger_pl[i], "block ai ", blocked_finger[i])
                if rating < AiMove.best_utility and (blocked_finger_pl[i] != 1 and blocked_finger[i] != 1):
                    #print("INI MASUK ",i)
                    AiMove.best_utility = rating
                    AiMove.best_state = probability_move[i]
                    AiMove.which_finger = i
            #print("utility", rating, end='\t State : ')
            probability_move[i].print()

        #print("best from serc: ", AiMove.best_utility)
        return probability_move, AiMove.best_utility, AiMove.best_state
class Pipeline(object):
    def __init__(self, trainFilePath, valFilePath, retrievalInstance,
                 featurizerInstance, classifierInstance, resultsPATH):
        self.retrievalInstance = retrievalInstance
        self.featurizerInstance = featurizerInstance
        self.classifierInstance = classifierInstance
        trainfile = open(trainFilePath, 'r')
        self.trainData = json.load(trainfile)
        trainfile.close()
        valfile = open(valFilePath, 'r')
        self.valData = json.load(valfile)
        valfile.close()
        self.PATH = resultsPATH
        self.question_answering()

    def makeXY(self, dataQuestions):
        X = []
        Y = []
        for question in dataQuestions:

            long_snippets = self.retrievalInstance.getLongSnippets(question)
            short_snippets = self.retrievalInstance.getShortSnippets(question)

            X.append(short_snippets)
            Y.append(question['answers'][0])

        return X, Y

    def question_answering(self):
        print('Loading data...')
        dataset_type = self.trainData['origin']
        candidate_answers = self.trainData['candidates']
        X_train, Y_train = self.makeXY(
            self.trainData['questions'][0:30000])  # 31049 questions
        X_val, Y_val_true = self.makeXY(self.valData['questions'])

        # featurization
        print('Feature Extraction...')
        X_features_train, X_features_val = self.featurizerInstance.getFeatureRepresentation(
            X_train, X_val)
        self.clf = self.classifierInstance.buildClassifier(
            X_features_train, Y_train)

        # Prediction
        print('Prediction...')
        Y_val_pred = self.clf.predict(X_features_val)

        self.evaluatorInstance = Evaluator()
        a = self.evaluatorInstance.getAccuracy(Y_val_true, Y_val_pred)
        p, r, f = self.evaluatorInstance.getPRF(Y_val_true, Y_val_pred)

        print("Accuracy: " + str(a))
        print("Precision: " + str(p))
        print("Recall: " + str(r))
        print("F-measure: " + str(f))

        # Correctly answered questions
        # correct_questions_indices = np.where(np.equal(Y_val_pred, Y_val_true))
        # correct_questions = X_val[correct_questions_indices]

        # Save predictions in json
        results = {
            'feature': self.featurizerInstance.__class__.__name__,
            'classifier': self.classifierInstance.__class__.__name__,
            'training size': len(X_train),
            'accuracy': a,
            'precision': p,
            'recall': r,
            'F-measure': f,
            'predictions': Y_val_pred.tolist()
        }
        file = open(os.path.join(
            self.PATH, self.featurizerInstance.__class__.__name__ +
            self.classifierInstance.__class__.__name__),
                    'w',
                    encoding='utf-8')
        json.dump(results, file, ensure_ascii=False)
Exemplo n.º 12
0
#
# Min Lee
# [email protected]
# MacOS
# Python
#
# In accordance with the class policies and Georgetown's Honor Code,
# I certify that, with the exceptions of the class resources and those
# items noted below, I have neither given nor received any assistance
# on this project.
#

import sys
from Classifier import knn
from Evaluator import Evaluator

classifier = knn(sys.argv)
evaluator = Evaluator(sys.argv)
performance = evaluator.evaluate(classifier, sys.argv)

print performance
Exemplo n.º 13
0
            tips.extend(res.get('items'))
        return [
            Photo(self.make_link(tip['prefix'], tip['suffix']),
                  tip.get('createdAt')) for tip in tips
        ]

    def make_link(self, prefix, suffix):
        """
        Input:
        'prefix': 'https://igx.4sqi.net/img/user/',
        'suffix': '/13893908-H3NB1YDQ4ZKX3CGI.jpg',
        to link:
        'https://igx.4sqi.net/img/user/13893908-H3NB1YDQ4ZKX3CGI.jpg'
        :param param:
        :return: link to a photo
        """
        return prefix + '500x500' + suffix


fp = FProvider()
places = fp.get_venues_near(Point(59.9538695, 30.2659853), 20000)
e = Evaluator()
marks = [e.evaluate_place(p) for p in places[:5]]
for m, p in zip(marks, places):
    print(p)
    print(m)
    print()

# ia = ImageAnalytics()
# i = ia._load_photo("https://igx.4sqi.net/img/general/500x500/13893908_t7OjS4DdVPAV0gMJL5N6g_qM2UEUZUFndo5uHDtdVD0.jpg")
# print(i)
Exemplo n.º 14
0
class llvmMultiobjetiveProblem(IntegerProblem):

    def __init__(self, max_epochs: int = 500, filename: str = None, solution_length: int = 100, population_size = int, 
                offspring_population_size = int, verbose: bool = True, upper_bound : int = 86):

        self.llvm = LlvmUtils(llvmpath='/usr/bin/', clangexe='clang-10', optexe='opt-10', llcexe='llc-10')
        self.llvmfiles = LlvmFiles(basepath='./', source_bc='polybench_small/polybench_small_original.bc', 
                                jobid=f'{population_size}_{offspring_population_size}_{solution_length}')
        self.evaluator = Evaluator(runs=0)
        self.number_of_variables = solution_length
        self.lower_bound = [0 for _ in range(self.number_of_variables)]
        self.upper_bound = [upper_bound for _ in range(self.number_of_variables)]
        self.obj_labels = ['codelines', 'tags', 'jumps', 'function_tags', 'calls']
        self.obj_directions = [self.MAXIMIZE, self.MINIMIZE, self.MINIMIZE, self.MINIMIZE, self.MINIMIZE]
        self.number_of_objectives = 5
        self.number_of_constraints = 0
        self.max_epochs = max_epochs
        self.evaluations = 0
        self.epoch = 1
        self.phenotype = 0
        self.population_size = population_size
        self.offspring_population_size = offspring_population_size
        self.dictionary = dict()
        self.verbose = verbose
        self.preloaded_dictionary = f"{self.number_of_variables}_dictionary.data"
        if os.path.exists(self.preloaded_dictionary):
            with open(self.preloaded_dictionary,"r") as file:
                print(f"reading '{self.preloaded_dictionary}'...")
                for line in file.readlines():
                    line = line[:-1] # \n
                    keyvalue = line.split(sep=";")
                    self.dictionary.update({keyvalue[0]:keyvalue[1]})

    def get_name(self):
        return 'Llvm Multiobjective Problem'

    def config_to_str(self):
        return f"{self.population_size}_{self.offspring_population_size}_{self.number_of_variables}_{self.max_epochs}"

    def evaluate(self, solution: IntegerSolution) -> IntegerSolution:
        self.phenotype +=1
        limit = [self.offspring_population_size if self.epoch != 1 else self.population_size]
        if self.phenotype%(limit[0]+1) == 0:
            self.epoch += 1
            self.phenotype = 1
        key = f"{solution.variables}"
        value = self.dictionary.get(key)
        if value == None:
            # Decoding
            passes = ""
            for i in range(self.number_of_variables):
                passes += f" {self.llvm.get_passes()[solution.variables[i]]}"

            # Optimize and generate resources
            self.llvm.toIR(self.llvmfiles.get_original_bc(), self.llvmfiles.get_optimized_bc(), passes=passes)
            self.llvm.toExecutable(self.llvmfiles.get_optimized_bc(), self.llvmfiles.get_optimized_exe())
            self.llvm.toAssembly(self.llvmfiles.get_optimized_bc(), self.llvmfiles.get_optimized_ll())

            # Get measures
            self.evaluator.evaluate(source_ll=self.llvmfiles.get_optimized_ll(), source_exe=self.llvmfiles.get_optimized_exe())
            solution.objectives[0] = self.evaluator.get_codelines()
            solution.objectives[1] = self.evaluator.get_tags()
            solution.objectives[2] = self.evaluator.get_total_jmps()
            solution.objectives[3] = self.evaluator.get_function_tags()
            solution.objectives[4] = self.evaluator.get_calls()
            self.dictionary.update({key: solution.objectives})
            self.evaluator.reset()
        else:
            # Get stored value
            solution.objectives[0] = value[0]
            solution.objectives[1] = value[1]
            solution.objectives[2] = value[2]
            solution.objectives[3] = value[3]
            solution.objectives[4] = value[4]
        
        if self.verbose:
            print("evaluated solution {:3} from epoch {:3} : variables={}, fitness={}"\
                .format(self.phenotype,self.epoch,solution.variables,solution.objectives))
        return solution

    ### FOR TERMINATION CRITERION ###
    def update(self, *args, **kwargs):
        self.evaluations = kwargs['EVALUATIONS']

    ### FOR TERMINATION CRITERION ###
    @property
    def is_met(self):
        met = self.epoch >= self.max_epochs
        if self.phenotype*self.epoch % 100 == 0 or met:
            with open(self.preloaded_dictionary, "w") as file:
                for keys,values in self.dictionary.items():
                    file.write('{};{}\n'.format(keys,values))
        return met
Exemplo n.º 15
0
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

#Simple RBM
SimpleRBM = RBMAlgorithm(epochs=40)
#Content
ContentKNN = ContentKNNAlgorithm()

#Combine them
Hybrid = HybridAlgorithm([SimpleRBM, ContentKNN], [0.5, 0.5])

evaluator.AddAlgorithm(SimpleRBM, "RBM")
evaluator.AddAlgorithm(ContentKNN, "ContentKNN")
evaluator.AddAlgorithm(Hybrid, "Hybrid")

# Fight!
evaluator.Evaluate(False)
Exemplo n.º 16
0
    #  'Anger': "Family Musical Comedy",
    #  'Depressing': "Drama Biography",
    #  "Confusing": 'Thriller Fantasy Crime',
    #  "Inspring": "Biography Documentary Sport War",
    # "Thrilling": "Horror Mystery"
}

for mood, c in moods.items():
    print(mood, c)
    # Load up common data set for the recommender algorithms
    (ml, evaluationData, rankings) = LoadMovieLensData(c)
    print("Searching for best parameters...")
    param_grid = {
        'n_epochs': [20, 30],
        'lr_all': [0.005, 0.010],
        'n_factors': [50, 100]
    }
    gs = GridSearchCV(SVD, param_grid, measures=['rmse', 'mae'], cv=3)
    gs.fit(evaluationData)
    evaluator = Evaluator(evaluationData, rankings,
                          list(ml.movieID_to_name.keys()))
    params = gs.best_params['rmse']
    SVDtuned = SVD(n_epochs=params['n_epochs'],
                   lr_all=params['lr_all'],
                   n_factors=params['n_factors'])
    evaluator.AddAlgorithm(SVDtuned, "SVD - Tuned")

    print("--------------------------------\n")
    filename = mood + ".sav"
    evaluator.SampleTopNRecs(ml, filename)
    print("--------------------------------\n\n")
Exemplo n.º 17
0
class Pipeline(object):
    def __init__(self, trainFilePath, valFilePath, retrievalInstance, featurizerInstance, classifierInstance):
        self.retrievalInstance = retrievalInstance
        self.featurizerInstance = featurizerInstance
        self.classifierInstance = classifierInstance
        self.evaluatorInstance = Evaluator()
        trainfile = open(trainFilePath, 'r')
        self.trainData = json.load(trainfile)
        self.trainData['questions'] = self.trainData['questions'][0:N]
        
        trainfile.close()
        valfile = open(valFilePath, 'r')
        self.valData = json.load(valfile)
        valfile.close()
        #self.question_answering()
        self.prepare_data()
        self.prepare_features()

    def makeXY(self, dataQuestions):
        X = []
        Y = []
        for question in dataQuestions:
            
            long_snippets = self.retrievalInstance.getLongSnippets(question)
            short_snippets = self.retrievalInstance.getShortSnippets(question)
            
            X.append(short_snippets)
            Y.append(question['answers'][0])
            
        return X, Y


    def get_data(self):
        dataset_type = self.trainData['origin']
        candidate_answers = self.trainData['candidates'] ##
        return self.makeXY(self.trainData['questions'])


    def prepare_data(self):
        dataset_type = self.trainData['origin']
        candidate_answers = self.trainData['candidates'] ##

        self.X_train, self.Y_train = self.makeXY(self.trainData['questions'])
        self.X_val, self.Y_val_true = self.makeXY(self.valData['questions'])

    def prepare_features(self):
        #featurization
        self.X_features_train, self.X_features_val = self.featurizerInstance.getFeatureRepresentation(self.X_train, self.X_val)

    def qa(self):
        self.clf = self.classifierInstance.buildClassifier(self.X_features_train, self.Y_train)
        #Prediction
        Y_val_pred = self.clf.predict(self.X_features_val)
        
        a = self.evaluatorInstance.getAccuracy(self.Y_val_true, Y_val_pred)
        p, r, f = self.evaluatorInstance.getPRF(self.Y_val_true, Y_val_pred)

        print("Accuracy: " + str(a))
        print("Precision: " + str(p))
        print("Recall: " + str(r))
        print("F-measure: " + str(f))
Exemplo n.º 18
0
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

#Content
ContentKNN = ContentKNNAlgorithm()

# User-based KNN
UserKNN = KNNBasic(sim_options={'name': 'cosine', 'user_based': True})

#Combine them
Hybrid = HybridAlgorithm([UserKNN, ContentKNN], [0.5, 0.5])

evaluator.AddAlgorithm(UserKNN, "User Based CF")
evaluator.AddAlgorithm(ContentKNN, "Content KNN")
evaluator.AddAlgorithm(Hybrid, "Hybrid")

# Fight!
Exemplo n.º 19
0
'''Creating NetworkGraph Object From its XML'''
networkGraph = NetworkGraph()
networkGraph.ReadFromXML(xmlnetpath, xsdnetpath)

'''Mesh generation, XML Network Graph is needed for creating XML Network Mesh.'''
meshGenerator = MeshGenerator()
meshGenerator.SetNetworkGraph(networkGraph)
networkMesh = NetworkMesh()
meshGenerator.SetNetworkMesh(networkMesh)
meshGenerator.SetMaxLength(ToleranceValue)
meshGenerator.GenerateMesh()

'''Setting Boundary Conditions Mesh input and reading XML Boundary Conditions File'''
simulationContext = SimulationContext()
simulationContext.ReadFromXML(xmlboundpath, xsdboundpath)
evaluator = Evaluator()
evaluator.SetSimulationContext(simulationContext)
simulationContext.SetEvaluator(evaluator)
boundaryConditions = BoundaryConditions()
boundaryConditions.SetSimulationContext(simulationContext)
boundaryConditions.SetNetworkMesh(networkMesh)
boundaryConditions.ReadFromXML(xmlboundpath, xsdboundpath)

'''Setting Evaluator'''
evaluator.SetNetworkGraph(networkGraph)
evaluator.SetNetworkMesh(networkMesh)
preRun = False
for el in networkMesh.Elements:
    if el.Type == 'WavePropagation' and el.nonLinear is True:
        preRun = True
        break
Exemplo n.º 20
0
    print("Loading movie ratings...")
    data = source.loadMovieLensRating()
    print("Prepare movie information...")
    source.computeMovieInformation()
    print("Creating ranking for each movie ...")
    rankings = source.getPopularityRanksByRating()
    return (source, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(dataSource, data, rankings) = LoadData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(data, rankings)

contentKNN = KNNAlgorithm()
evaluator.AddAlgorithm(contentKNN, "ContentKNN")

# Just make random recommendations
# Random = NormalPredictor()
# evaluator.AddAlgorithm(Random, "Random")

evaluator.Evaluate()

useTargetId = 85
totalMovieNeeded = 5
evaluator.GetRecomendationMovie(dataSource, useTargetId, totalMovieNeeded)
Exemplo n.º 21
0
    print("Loading movie ratings...")
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

#RBM
RBM = RBMAlgorithm(epochs=20)
evaluator.AddAlgorithm(RBM, "RBM")

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")

# Fight!
evaluator.Evaluate(False)

#evaluator.SampleTopNRecs(ml)
Exemplo n.º 22
0
from End import EndSuccess
from End import EndFail

import rospy

CONFIDENCE_THRESHOLD = 20  # number of tennis ball pixels
DIST_THRESHOLD = 5
MAX_SPEED_AT_DIST = 10  # distance at which rover should be traveling at max speed
MAX_SPEED_AT_ANGLE = math.pi / 2  # angular distance at which rover should be turning at max speed
MIN_DRIVE_SPEED = 150
MIN_TURNING_SPEED = 180

seek_states = {
    "evaluator":
    Evaluator(CONFIDENCE_THRESHOLD, DIST_THRESHOLD, goalTracker),
    "seeker":
    Seeker(CONFIDENCE_THRESHOLD, MAX_SPEED_AT_DIST, MAX_SPEED_AT_ANGLE,
           MIN_DRIVE_SPEED, MIN_TURNING_SPEED_SPEED),
    "failure":
    EndFail(),
    "success":
    EndSuccess(),
}

seek_transitions = {
    "evaluator:far": "failure",
    "evaluator:close": "seeker",
    "evaluator:lost": "success",
    "seeker:reached": "exit:waypoint",
    "seeker:lost": "failure",
Exemplo n.º 23
0
def astra(args, logger):
    """
        Self-training with weak supervivsion
        Leverages labeled, unlabeled data and weak rules for training a neural network
    """

    teacher_dev_res_list = []
    teacher_test_res_list = []
    teacher_train_res_list = []
    dev_res_list = []
    test_res_list = []
    train_res_list = []
    results = {}

    student_pred_list = []

    ev = Evaluator(args, logger=logger)

    logger.info("building student: {}".format(args.student_name))
    student = Student(args, logger=logger)

    logger.info("building teacher")
    teacher = Teacher(args, logger=logger)

    logger.info("loading data")
    dh = DataHandler(args, logger=logger, student_preprocess=student.preprocess, teacher_preprocess=teacher.preprocess)
    train_dataset = dh.load_dataset(method='train')
    train_dataset.oversample(args.oversample)  
    dev_dataset = dh.load_dataset(method='dev')
    test_dataset = dh.load_dataset(method='test')
    unlabeled_dataset = dh.load_dataset(method='unlabeled')

    logger.info("creating pseudo-dataset")
    pseudodataset = dh.create_pseudodataset(unlabeled_dataset)
    pseudodataset.downsample(args.sample_size)

    # Train Student
    newtraindataset = dh.create_pseudodataset(train_dataset)
    newtraindataset.balance('labels')
    newtraindataset.report_stats('labels')
    results['student_train'] = student.train(
        train_dataset=newtraindataset,
        dev_dataset=dev_dataset,
        train_label_name='labels',
        dev_label_name='labels',
    )
    train_res_list.append(results['student_train'])
    student.save('supervised_student')

    logger.info("\n\n\t*** Evaluating on dev data ***")
    results['supervised_student_dev'] = evaluate(student, dev_dataset, ev, "student dev")
    dev_res_list.append(results['supervised_student_dev'])

    logger.info("\n\n\t*** Evaluating on test data ***")
    results['supervised_student_test'], s_test_dict = evaluate_test(student, test_dataset, ev, "student test")
    test_res_list.append(results['supervised_student_test'])
    student_pred_list.append(s_test_dict)

    # Initialize Teacher
    logger.info("initializing teacher on unlabeled data with majority voting")
    teacher_res = teacher.train(pseudodataset)

    logger.info("evaluating majority voting")
    results['teacher_train'] = evaluate(teacher, train_dataset, ev, "teacher train")
    results['teacher_dev'] = evaluate(teacher, dev_dataset, ev, "teacher dev")
    results['teacher_test'] = evaluate(teacher, test_dataset, ev, "teacher test")
    teacher_train_res_list.append(results['teacher_train'])
    teacher_dev_res_list.append(results['teacher_dev'])
    teacher_test_res_list.append(results['teacher_test'])

    # Self-Training with Weak Supervision
    for iter in range(args.num_iter):
        logger.info("\n\n\t *** Starting loop {} ***".format(iter))

        # Create pseudo-labeled dataset
        pseudodataset.downsample(args.sample_size)

        # Add Student as extra rule in teacher.
        logger.info("Adding Student as extra rule in Teacher")
        teacher.student = student

        _ = teacher.train_ran(train_dataset=train_dataset, train_label_name='labels',
                              dev_dataset=dev_dataset, dev_label_name='labels',
                              unlabeled_dataset=pseudodataset)

        # Apply Teacher on unlabeled data
        teacher_pred_dict_unlabeled = teacher.predict_ran(dataset=pseudodataset)
        teacher_dev_res, t_dev_dict = evaluate_ran(teacher, dev_dataset, ev, "teacher dev iter{}".format(iter))
        teacher_dev_res_list.append(teacher_dev_res)

        teacher_test_res, t_test_dict = evaluate_ran(teacher, test_dataset, ev, "teacher test iter{}".format(iter))
        # analyze_rule_attention_scores(t_test_dict, logger, args.logdir, name='test_iter{}'.format(iter))
        teacher_test_res_list.append(teacher_test_res)

        # Update unlabeled data with Teacher's predictions
        pseudodataset.data['teacher_labels'] = teacher_pred_dict_unlabeled['preds']
        pseudodataset.data['teacher_proba'] = teacher_pred_dict_unlabeled['proba']
        pseudodataset.data['teacher_weights'] = np.max(teacher_pred_dict_unlabeled['proba'], axis=1)
        pseudodataset.drop(col='teacher_labels', value=-1)

        pseudodataset.balance('teacher_labels', proba='teacher_proba')
        pseudodataset.report_stats('teacher_labels')

        if len(set(teacher_pred_dict_unlabeled['preds'])) == 1:
            # Teacher predicts a single class
            logger.info("Self-training led to trivial predictions. Stopping...")
            break

        if len(pseudodataset) < 5:
            logger.info("[WARNING] Sampling led to only {} examples. Skipping iteration...".format(len(pseudodataset)))
            continue

        # Re-train student with weighted pseudo-instances
        logger.info('training student on pseudo-labeled instances provided by the teacher')
        train_res = student.train_pseudo(
            train_dataset=pseudodataset,
            dev_dataset=dev_dataset,
            train_label_name='teacher_proba' if args.soft_labels else 'teacher_labels',
            train_weight_name='teacher_weights' if args.loss_weights else None,
            dev_label_name='labels',
        )

        logger.info('fine-tuning the student on clean labeled data')
        train_res = student.finetune(
            train_dataset=newtraindataset,
            dev_dataset=dev_dataset,
            train_label_name='labels',
            dev_label_name='labels',
        )
        train_res_list.append(train_res)

        # Evaluate student performance and update records
        dev_res = evaluate(student, dev_dataset, ev, "student dev iter{}".format(iter))
        test_res, s_test_dict = evaluate_test(student, test_dataset, ev, "student test iter{}".format(iter))
        logger.info("Student Dev performance on iter {}: {}".format(iter, dev_res['perf']))
        logger.info("Student Test performance on iter {}: {}".format(iter, test_res['perf']))

        prev_max = max([x['perf'] for x in dev_res_list])
        if dev_res['perf'] > prev_max:
            logger.info("Improved dev performance from {:.2f} to {:.2f}".format(prev_max, dev_res['perf']))
            student.save("student_best")
            teacher.save("teacher_best")
        dev_res_list.append(dev_res)
        test_res_list.append(test_res)
        student_pred_list.append(s_test_dict)

    # Store Final Results
    logger.info("Final Results")
    teacher_all_dev = [x['perf'] for x in teacher_dev_res_list]
    teacher_all_test = [x['perf'] for x in teacher_test_res_list]
    teacher_perf_str = ["{}:\t{:.2f}\t{:.2f}".format(i, teacher_all_dev[i], teacher_all_test[i]) for i in np.arange(len(teacher_all_dev))]
    logger.info("TEACHER PERFORMANCES:\n{}".format("\n".join(teacher_perf_str)))

    all_dev = [x['perf'] for x in dev_res_list]
    all_test = [x['perf'] for x in test_res_list]
    perf_str = ["{}:\t{:.2f}\t{:.2f}".format(i, all_dev[i], all_test[i]) for i in np.arange(len(all_dev))]
    logger.info("STUDENT PERFORMANCES:\n{}".format("\n".join(perf_str)))

    # Get results in the best epoch (if multiple best epochs keep last one)
    best_dev_epoch = len(all_dev) - np.argmax(all_dev[::-1]) - 1
    best_test_epoch = len(all_test) - np.argmax(all_test[::-1]) - 1
    logger.info("BEST DEV {} = {:.3f} for epoch {}".format(args.metric, all_dev[best_dev_epoch], best_dev_epoch))
    logger.info("FINAL TEST {} = {:.3f} for epoch {} (max={:.2f} for epoch {})".format(args.metric,
                                                                                       all_test[best_dev_epoch], best_dev_epoch, all_test[best_test_epoch], best_test_epoch))
    results['teacher_train_iter'] = teacher_train_res_list
    results['teacher_dev_iter'] = teacher_dev_res_list
    results['teacher_test_iter'] = teacher_test_res_list

    results['student_train_iter'] = train_res_list
    results['student_dev_iter'] = dev_res_list
    results['student_test_iter'] = test_res_list

    results['student_dev'] = dev_res_list[best_dev_epoch]
    results['student_test'] = test_res_list[best_dev_epoch]
    results['teacher_dev'] = teacher_dev_res_list[best_dev_epoch]
    results['teacher_test'] = teacher_test_res_list[best_dev_epoch]
    
    # Save models and results
    student.save("student_last")
    teacher.save("teacher_last")
    save_and_report_results(args, results, logger)
    return results
    ml = MovieLens()
    print('Loading movie ratings..')
    data = ml.loadMovieLensDataset()
    #Compute movie popularity ranks to measure novelty
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

t0 = time()
evaluator = Evaluator(evaluationData, rankings)

#Content-based
ContentKNN = ContentBasedAlgorithm()
evaluator.AddAlgorithm(ContentKNN, "ContentBased")

#User-Based
sim_options_user = {'name': 'cosine', 'user_based': True}
userKNN = KNNBasic(sim_options=sim_options_user)
evaluator.AddAlgorithm(userKNN, "UserBased")

#Item-Based
sim_options_item = {'name': 'cosine', 'user_based': False}
itemKNN = KNNBasic(sim_options=sim_options_item)
evaluator.AddAlgorithm(itemKNN, "ItemBased")
Exemplo n.º 25
0
    data = ml.loadMovieLensLatestSmall()
    print(
        "\nComputing movie popularity ranks so we can measure novelty later..."
    )
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

# User-based KNN
UserKNN = KNNBasic(sim_options={'name': 'cosine', 'user_based': True})
evaluator.AddAlgorithm(UserKNN, "User KNN")

# Item-based KNN
ItemKNN = KNNBasic(sim_options={'name': 'cosine', 'user_based': False})
evaluator.AddAlgorithm(ItemKNN, "Item KNN")

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")

# Fight!
evaluator.Evaluate(False)
Exemplo n.º 26
0
(ml, evaluationData, rankings) = LoadMovieLensData()

print("Searching for best parameters...")
param_grid = {'hiddenDim': [20, 10], 'learningRate': [0.1, 0.01]}
gs = GridSearchCV(RBMAlgorithm, param_grid, measures=['rmse', 'mae'], cv=3)

gs.fit(evaluationData)

# best RMSE score
print("Best RMSE score attained: ", gs.best_score['rmse'])

# combination of parameters that gave the best RMSE score
print(gs.best_params['rmse'])

# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)

params = gs.best_params['rmse']
RBMtuned = RBMAlgorithm(hiddenDim=params['hiddenDim'],
                        learningRate=params['learningRate'])
evaluator.AddAlgorithm(RBMtuned, "RBM - Tuned")

RBMUntuned = RBMAlgorithm()
evaluator.AddAlgorithm(RBMUntuned, "RBM - Untuned")

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, "Random")

# Fight!
evaluator.Evaluate(False)
Exemplo n.º 27
0
logger.info('Saving model architecture')
with open(out_dir + '/model_arch.json', 'w') as arch:
    arch.write(model.to_json(indent=2))

logger.info(
    '---------------------------------------------------------------------------------------'
)

###############################################################################################################################
## Training
#

logger.info('Initial Evaluation:')
evl = Evaluator(logger,
                out_dir, (train_qn_x, train_ans_x, train_y),
                (dev_qn_x, dev_ans_x, dev_y), (test_qn_x, test_ans_x, test_y),
                model_type,
                batch_size_eval=batch_size_eval,
                print_info=True)
evl.evaluate(model, -1)

evl.print_info()

total_train_time = 0
total_eval_time = 0

for ii in range(nb_epoch):
    # Training
    train_input = [train_qn_x, train_ans_x]

    t0 = time()
    # this model.fit function is the neuralnet training
Exemplo n.º 28
0
 def __init__(self):
     Evaluator.__init__(self)
Exemplo n.º 29
0
def runSimulation(simType, defaultNet, wdir, odir, images, xsd, net, mesh,
                  xmlout, bound, netSchema, boundSchema, template, parameters,
                  diameters, days, xmlSol, xmlMesh, writeCsv, plotImages,
                  plotPressure, plotFlow, plotWss, plotReynolds, writePressure,
                  writeFlow, writeWss, writeReynolds, velocityProfile, results,
                  excludeWss, export, automaticResults, inputGnuid):
    '''Welcome and instructions messages.'''

    print "##########################################"
    print "############ Welcome to pyNS #############"
    print "## ./pyNS -h or --help for instructions ##"
    print "##########################################\n"
    '''Exporting results into txt files'''
    if export is not False:
        if not os.path.exists('Results/%s/exportedSolutions' % export):
            os.mkdir('Results/%s/exportedSolutions' % export)
        for f in mylistdir('Results/%s/json' % export):
            if f == 'info.json':
                pass
            else:
                print "exporting Results/%s/json/" % export + f
                exporting('Results/%s/json/' % export + f)
                new_file = f.split('.')[0] + '.txt'
                shutil.move(
                    'Results/%s/json/' % export + new_file,
                    'Results/%s/exportedSolutions/' % export + new_file)
        sys.exit(
            'All %s solutions exported successfully in Results/%s/exportedSolutions/ folder'
            % (export, export))

    if not results:
        if defaultNet is True:
            simType = 'specific'
            net = 'vascular_network_arterial_right_arm.xml'
            bound = 'boundary_conditions_arterial_right_arm.xml'
        elif template == 'willis':
            simType = 'specific'
            wdir = 'XML/Models/WillisCircle'
            net = 'vascular_network_willis.xml'
            bound = 'boundary_conditions_willis.xml'
        elif simType == 'specific':
            if net is None and bound is not None:
                sys.exit(
                    "Please provide a network graph XML input file or choose a generic simulation type."
                )
            elif net is not None and bound is None:
                sys.exit(
                    "Please provide a boundary conditions XML input file or choose a generic simulation type."
                )
            elif net is None and bound is None:
                sys.exit(
                    "Please provide either a network graph XML input file and a boundary conditions XML input file or choose a generic simulation type."
                )
    '''Checking matplotlib module for optional plotting methods.'''
    if plotImages or plotFlow or plotPressure or plotWss or plotReynolds or velocityProfile is True:
        try:
            import matplotlib
        except ImportError:
            sys.exit(
                'Matplotlib package is required for plotting solutions in .png files or computing velocityProfile videos.\nPlease download matplotlib from matplotlib.sourceforge.net.'
            )
    '''Loading previous specific results.'''
    if results is not False:
        while True:
            print "Starting webServer for post-processing results. Close it with CTRL-C."
            Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
            try:
                port = 8000
                httpd = SocketServer.TCPServer(("localhost", port), Handler)
            except:
                try:
                    pid = None
                    for line in os.popen("lsof -i:8000"):
                        fields = line.split()
                        pid = fields[1]
                    if pid:
                        os.system("kill %s" % pid)
                        time.sleep(5)
                    httpd = SocketServer.TCPServer(("localhost", port),
                                                   Handler)
                except:
                    connected = False
                    startPort = 8000
                    while not connected:
                        try:
                            httpd = SocketServer.TCPServer(
                                ("localhost", startPort), Handler)
                            connected = True
                            port = startPort
                        except:
                            startPort += 1

            if results == 'last':
                ip = "http://localhost:%s" % port
                webbrowser.open_new_tab(ip + '/Results/results.html')
            else:
                if os.path.exists('Results/' + results):
                    ip = "http://localhost:%s" % port
                    webbrowser.open_new_tab(ip + "/Results/" + results +
                                            "/results.html")
                else:
                    sys.exit('Error: ' + results +
                             ' directory does not exist.')
            httpd.serve_forever()
    '''Checking for webserver instance'''
    if automaticResults:
        try:
            ip = "http://localhost:8000"
            pid = None
            for line in os.popen("lsof -i:8000"):
                fields = line.split()
                pid = fields[1]
            if pid:
                os.system("kill %s" % pid)
            Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
            httpd = SocketServer.TCPServer(("localhost", 8000), Handler)
        except:
            connected = False
            startPort = 8000
            while not connected:
                try:
                    Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
                    httpd = SocketServer.TCPServer(("localhost", startPort),
                                                   Handler)
                    connected = True
                    port = startPort
                    ip = "http://localhost:%s" % port
                except:
                    startPort += 1
    '''SIMULATION'''
    '''Create XML and image directories'''
    if not os.path.exists(wdir):
        os.mkdir(wdir)
    if not os.path.exists(xsd):
        os.mkdir(xsd)
    '''If needed, creating output directory(s).'''
    if xmlSol is True or xmlMesh is True or writeFlow is True or writePressure is True or writeWss is True or writeReynolds is True:
        if not os.path.exists(odir):
            os.mkdir(odir)
    if writeFlow is True:
        ofdir = os.path.join(odir, 'Flow/')
        if not os.path.exists(ofdir):
            os.mkdir(ofdir)
    if writePressure is True:
        opdir = os.path.join(odir, 'Pressure/')
        if not os.path.exists(opdir):
            os.mkdir(opdir)
    if writeWss is True:
        owdir = os.path.join(odir, 'Wss/')
        if not os.path.exists(owdir):
            os.mkdir(owdir)
    if writeReynolds is True:
        oodir = os.path.join(odir, 'Other/')
        if not os.path.exists(oodir):
            os.mkdir(oodir)
    '''If needed, creating images directory.'''
    if plotImages is True:
        f_images = os.path.join(images, 'Flow/')
        p_images = os.path.join(images, 'Pressure/')
        w_images = os.path.join(images, 'Wss/')
        o_images = os.path.join(images, 'Other/')
        if not os.path.exists(images):
            os.mkdir(images)
            os.mkdir(f_images)
            os.mkdir(p_images)
            os.mkdir(w_images)
            os.mkdir(o_images)
    '''Setting variables.'''
    testTube = 'XML/TEST/CircularStraightTube/'
    netTube = 'vascular_network_v3.0_TUBE.xml'
    boundTube = 'boundary_conditions_v2.0_TUBE.xml'
    testTape = 'XML/TEST/CircularTaperedTube/'
    netTape = 'vascular_network_v3.0_TAPE.xml'
    boundTape = 'boundary_conditions_v2.0_TAPE.xml'
    testSimple = 'XML/TEST/SimpleNetwork/'
    netSimple = 'vascular_network_simple.xml'
    boundSimple = 'boundary_conditions_simple.xml'
    testing = 'XML/TEST/Testing/'
    testingNetwork = 'vascular_network_test.xml'
    testingBoundary = 'boundary_conditions_test.xml'

    if simType == 'specific':
        xmlnetpath = os.path.join(wdir, net)
        xmlboundpath = os.path.join(wdir, bound)
        preRun = True
    if simType == 'tube':
        xmlnetpath = os.path.join(testTube, netTube)
        xmlboundpath = os.path.join(testTube, boundTube)
        preRun = False
    if simType == 'tape':
        xmlnetpath = os.path.join(testTape, netTape)
        xmlboundpath = os.path.join(testTape, boundTape)
        preRun = False
    if simType == 'simple':
        xmlnetpath = os.path.join(testSimple, netSimple)
        xmlboundpath = os.path.join(testSimple, boundSimple)
        preRun = False
    if simType == 'testing':
        xmlnetpath = os.path.join(testing, testingNetwork)
        xmlboundpath = os.path.join(testing, testingBoundary)
        preRun = False

    xmlmeshpath = os.path.join(wdir, mesh)
    xmloutpath = os.path.join(odir, xmlout)
    xsdnetpath = os.path.join(xsd, netSchema)
    xsdboundpath = os.path.join(xsd, boundSchema)
    '''Setting adaptation and simulation days'''
    adaptation = Adaptation()
    daysList = map(int, list(linspace(-1, days, days + 2)))
    if excludeWss is True and days > 0:
        sys.exit(
            "Error: You can't exclude Wss computing for adaptation algorithm")
    '''Setting Simulation Context Parameters for Simulation'''
    simulationContext = SimulationContext()
    evaluator = Evaluator()
    evaluator.SetSimulationContext(simulationContext)
    simulationContext.SetEvaluator(evaluator)

    for day in daysList:
        if day <= 0:
            '''Parameters Model Adaptor'''
            if simType == 'generic':
                modelAdaptor = ModelAdaptor()
                modelAdaptor.SetSimulationContext(simulationContext)
                modelAdaptor.SetEvaluator(evaluator)
                modelAdaptor.ChoosingTemplate(parameters)
                if template == 'arm':
                    if day == -1:
                        modelAdaptor.ftype = 7
                    if modelAdaptor.arm == 0:
                        if modelAdaptor.ftype == 0:
                            wdir = 'XML/Models/Left_Arm/#0.Lower_RC_EE'
                            preRun = True
                        if modelAdaptor.ftype == 1:
                            wdir = 'XML/Models/Left_Arm/#1.Lower_RC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 2:
                            pass
                        if modelAdaptor.ftype == 3:
                            wdir = 'XML/Models/Left_Arm/#3.Upper_BC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 4:
                            pass
                        if modelAdaptor.ftype == 5:
                            wdir = 'XML/Models/Left_Arm/#5.Upper_BB_ES'
                            preRun = True
                        if modelAdaptor.ftype == 6:
                            pass
                        if modelAdaptor.ftype == 7:
                            wdir = 'XML/Models/Left_Arm/PRE'
                            preRun = False
                    if modelAdaptor.arm == 1:
                        if modelAdaptor.ftype == 0:
                            wdir = 'XML/Models/Right_Arm/#0.Lower_RC_EE'
                            preRun = True
                        if modelAdaptor.ftype == 1:
                            wdir = 'XML/Models/Right_Arm/#1.Lower_RC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 2:
                            pass
                        if modelAdaptor.ftype == 3:
                            wdir = 'XML/Models/Right_Arm/#3.Upper_BC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 4:
                            pass
                        if modelAdaptor.ftype == 5:
                            wdir = 'XML/Models/Right_Arm/#5.Upper_BB_ES'
                            preRun = True
                        if modelAdaptor.ftype == 6:
                            pass
                        if modelAdaptor.ftype == 7:
                            wdir = 'XML/Models/Right_Arm/PRE'
                            preRun = False

                netPostGeneric = 'vascular_network.xml'
                boundPostGeneric = 'boundary_conditions.xml'
                netPost = modelAdaptor.Idpat + '_vascular_network.xml'
                boundPost = modelAdaptor.Idpat + '_boundary_conditions.xml'
                xmlnetpathGeneric = os.path.join(wdir, netPostGeneric)
                xmlboundpathGeneric = os.path.join(wdir, boundPostGeneric)
                xmlnetpath = os.path.join(wdir, netPost)
                xmlboundpath = os.path.join(wdir, boundPost)
                simulationContext.ReadFromXML(xmlboundpathGeneric,
                                              xsdboundpath)
            else:
                simulationContext.ReadFromXML(xmlboundpath, xsdboundpath)

            if simType == 'generic':
                modelAdaptor.SettingParameters(parameters)
                modelAdaptor.AdaptingParameters(xmlboundpathGeneric,
                                                xmlboundpath)
            '''Creating NetworkGraph Object From its XML'''
            networkGraph = NetworkGraph()
            if simType == 'generic':
                networkGraph.ReadFromXML(xmlnetpathGeneric, xsdnetpath)
            else:
                networkGraph.ReadFromXML(xmlnetpath, xsdnetpath)
            '''NetworkGraph Model Adaptor'''
            if simType == 'generic':
                modelAdaptor.SetNetworkGraph(networkGraph)
                evaluator.SetNetworkGraph(networkGraph)
                if diameters is False:
                    csvfilepath = modelAdaptor.AdaptingModel(
                        xmlnetpathGeneric, xmlnetpath)
                else:
                    csvfilepath = modelAdaptor.AdaptingModel(
                        xmlnetpathGeneric, xmlnetpath, diameters)
            '''Setting results directory based on PatientID in networkGraph XML file'''

            if plotImages is False:
                try:
                    shutil.rmtree('Results/json')
                except:
                    pass
                try:
                    os.mkdir('Results/json')
                except:
                    pass
                if simType == 'generic':
                    idPat = modelAdaptor.Idpat
                elif template == 'willis':
                    idPat = template
                else:
                    idPat = simType
                if os.path.exists('Results/%s' % idPat):
                    pass
                else:
                    os.mkdir('Results/%s' % idPat)
                    os.mkdir('Results/%s/json' % idPat)
                    shutil.copytree('Results/css', 'Results/%s/css' % idPat)
                    shutil.copytree('Results/js', 'Results/%s/js' % idPat)
                    shutil.copy('Results/results.html',
                                'Results/%s/results.html' % idPat)
            '''Mesh generation, XML Network Graph is needed for creating XML Network Mesh.'''
            meshGenerator = MeshGenerator()
            meshGenerator.SetNetworkGraph(networkGraph)
            networkMesh = NetworkMesh()
            meshGenerator.SetNetworkMesh(networkMesh)
            meshGenerator.SetMaxLength(5.0e-2)
            meshGenerator.GenerateMesh()
        '''Setting Boundary Conditions Mesh input and reading XML Boundary Conditions File'''
        boundaryConditions = BoundaryConditions()
        boundaryConditions.SetSimulationContext(simulationContext)
        boundaryConditions.SetNetworkMesh(networkMesh)
        boundaryConditions.ReadFromXML(xmlboundpath, xsdboundpath)
        boundaryConditions.SetSpecificCardiacOutput()
        '''In case of a generic simulation, patient-specific generated files will be moved to Results folder.'''
        if simType == 'generic' and day < 0:
            shutil.move(os.path.abspath(xmlnetpath),
                        ('Results/%s/%s_pre_vascular_network.xml' %
                         (idPat, idPat)))
            shutil.move(os.path.abspath(xmlboundpath),
                        ('Results/%s/%s_pre_boundary_conditions.xml' %
                         (idPat, idPat)))
            shutil.move(os.path.abspath(csvfilepath),
                        ('Results/%s/%s_pre_patient_specific.csv' %
                         (idPat, idPat)))
        if simType == 'generic' and day == 0:
            shutil.copy(os.path.abspath(xmlnetpath),
                        ('Results/%s/%s_post_vascular_network.xml' %
                         (idPat, idPat)))
            shutil.copy(os.path.abspath(xmlboundpath),
                        ('Results/%s/%s_post_boundary_conditions.xml' %
                         (idPat, idPat)))
            shutil.copy(os.path.abspath(csvfilepath),
                        ('Results/%s/%s_post_patient_specific.csv' %
                         (idPat, idPat)))
        if simType == 'generic' and day > 0 and day == days:
            shutil.move(os.path.abspath(xmlnetpath),
                        ('Results/%s/%s_adapted_vascular_network.xml' %
                         (idPat, idPat)))
            shutil.move(os.path.abspath(xmlboundpath),
                        ('Results/%s/%s_adapted_boundary_conditions.xml' %
                         (idPat, idPat)))
            shutil.move(os.path.abspath(csvfilepath),
                        ('Results/%s/%s_adapted_patient_specific.csv' %
                         (idPat, idPat)))
        '''Setting Evaluator'''
        evaluator.SetNetworkGraph(networkGraph)
        evaluator.SetNetworkMesh(networkMesh)
        '''Adaptation Model'''
        adaptation.SetBoundaryConditions(boundaryConditions)
        adaptation.SetSimulationContext(simulationContext)
        preRun = adaptation.Adapt(day)
        if len(daysList) == 1:
            pass
        else:
            print "Day %d " % (day * 10)  #1 step represent 10 days
        ''' Setting Solver Class'''
        solver = SolverFirstTrapezoid()
        solver.SetNetworkMesh(networkMesh)
        solver.SetBoundaryConditions(boundaryConditions)
        solver.SetSimulationContext(simulationContext)
        solver.SetEvaluator(evaluator)
        '''Pre-run'''
        if preRun is True:
            solver.SetSteadyFlow()
            print "Steady Pre-Run, setting non-linear parameters"
            solver.Solve()
            parametersToLinear = ["Radius", "Compliance"]
            for el in networkMesh.Elements:
                el.SetLinearValues(parametersToLinear)
            networkMesh.checkLinearConsistence()
        '''Run'''
        evaluator.ExpressionCache = {}
        solver = SolverFirstTrapezoid()
        solver.SetNetworkMesh(networkMesh)
        solver.SetBoundaryConditions(boundaryConditions)
        solver.SetSimulationContext(simulationContext)
        solver.SetEvaluator(evaluator)
        solver.SetPulseFlow()
        print "Solving system"
        solver.Solve()
        '''Post Processing: Setting Solutions input and plotting some information and/or writing solutions to XML Solutions File'''
        '''User can choose two different post processing strategies. Saving images using matplotlib or visualize results in its browser'''
        '''If needed, pyNS writes xml mesh file'''
        if xmlMesh is True:
            meshdirpath = os.path.join(odir, str(day))
            if not os.path.exists(meshdirpath):
                os.mkdir(meshdirpath)
            xmlmeshpath = os.path.join(meshdirpath, mesh)
            outdirpath = os.path.join(odir, str(day))
            if not os.path.exists(outdirpath):
                os.mkdir(outdirpath)
            xmloutpath = os.path.join(outdirpath, xmlout)
            networkMesh.WriteToXML(xmlmeshpath)
        '''Setting NetworkSolutions'''
        print "->100%, Running post-processing"
        networkSolutions = NetworkSolutions()
        networkSolutions.SetNetworkMesh(networkMesh)
        networkSolutions.SetNetworkGraph(networkGraph)
        networkSolutions.SetSimulationContext(simulationContext)
        networkSolutions.SetSolutions(solver.Solutions)
        networkSolutions.WriteJsonInfo(days, networkMesh.Elements, idPat)
        adaptation.SetSolutions(day, networkSolutions)
        adaptation.SetRefValues(day, networkMesh)
        '''If needed, pyNS creates images subdirectory(s) for each adaptation step.'''
        if plotImages is True:
            daystr = str(day) + '/'
            f_dayImages = os.path.join(f_images, daystr)
            p_dayImages = os.path.join(p_images, daystr)
            w_dayImages = os.path.join(w_images, daystr)
            o_dayImages = os.path.join(o_images, daystr)
            if not os.path.exists(images):
                os.mkdir(images)
            if not os.path.exists(f_dayImages):
                os.mkdir(f_dayImages)
            if not os.path.exists(p_dayImages):
                os.mkdir(p_dayImages)
            if not os.path.exists(w_dayImages):
                os.mkdir(w_dayImages)
            if not os.path.exists(o_dayImages):
                os.mkdir(o_dayImages)
            networkSolutions.SetImagesPath({
                'im': images,
                'f': f_dayImages,
                'p': p_dayImages,
                'w': w_dayImages,
                'o': o_dayImages
            })
        '''If needed, pyNS creates output subdirectory(s) for each adaptation step.'''
        if writeFlow is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day) + '/'
            f_dayOutput = os.path.join(ofdir, daystr)
            if not os.path.exists(f_dayOutput):
                os.mkdir(f_dayOutput)
        if writePressure is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day) + '/'
            p_dayOutput = os.path.join(opdir, daystr)
            if not os.path.exists(p_dayOutput):
                os.mkdir(p_dayOutput)
        if writeWss is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day) + '/'
            w_dayOutput = os.path.join(owdir, daystr)
            if not os.path.exists(w_dayOutput):
                os.mkdir(w_dayOutput)
        if writeReynolds is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day) + '/'
            o_dayOutput = os.path.join(oodir, daystr)
            if not os.path.exists(o_dayOutput):
                os.mkdir(o_dayOutput)
        '''If needed, pyNS writes xml Solution file.'''
        if xmlSol is True:
            networkSolutions.WriteToXML(xmloutpath)
        '''Post process solution for each element of the network'''
        for element in networkMesh.Elements:
            if element.Type == 'WavePropagation' or element.Type == 'Resistance':
                networkSolutions.WriteJson(element.Id, day, excludeWss, idPat)
                if velocityProfile is True:
                    networkSolutions.SaveVelocityProfile(element, str(day))
                if plotFlow is True:
                    networkSolutions.PlotFlow(element.Id)
                if plotPressure is True:
                    networkSolutions.PlotPressure(element.Id)
                if plotWss is True:
                    networkSolutions.PlotWSS(element)
                if plotReynolds is True:
                    networkSolutions.PlotReynolds(element.Id)
                if writeFlow is True:
                    networkSolutions.WriteFlowOutput(
                        element.Id,
                        f_dayOutput + 'Flow_' + element.Name + '.txt')
                if writePressure is True:
                    networkSolutions.WritePressureInput(
                        element.Id,
                        p_dayOutput + '/p_in_' + element.Name + '.txt')
                    networkSolutions.WritePressureOutput(
                        element.Id,
                        p_dayOutput + '/p_out_' + element.Name + '.txt')
                    networkSolutions.WritePressureDrop(
                        element.Id,
                        p_dayOutput + '/p_drop_' + element.Name + '.txt')
                if writeWss is True:
                    networkSolutions.WriteWSSOutput(
                        element.Id,
                        w_dayOutput + 'WSS_' + element.Name + '.txt')
                if writeReynolds is True:
                    networkSolutions.WriteReynolds(
                        element.Id,
                        o_dayOutput + 'Reynolds' + element.Name + '.txt')
    '''Adaptation data'''
    if days > 0:
        networkSolutions.WriteJsonAdapt(adaptation, idPat)
        if writeCsv is True:
            networkSolutions.WriteToCsv(adaptation, 'Diameter')
            networkSolutions.WriteToCsv(adaptation, 'Pressure')
            networkSolutions.WriteToCsv(adaptation, 'Flow')
            networkSolutions.WriteToCsv(adaptation, 'Wss')
    '''Export GNUID'''
    if inputGnuid:
        networkSolutions.GetGnuidInformation(idPat, inputGnuid)

    print "\nJOB FINISHED"
    if automaticResults:
        try:
            shutil.copytree('Results/%s/json' % idPat,
                            'Results/json',
                            symlinks=True)
        except OSError:
            shutil.rmtree('Results/json')
            shutil.copytree('Results/%s/json' % idPat,
                            'Results/json',
                            symlinks=True)
        print "Starting webServer for post-processing results. Close it with CTRL-C."
        webbrowser.open_new_tab(ip + '/Results/results.html')
        httpd.serve_forever()
Exemplo n.º 30
0
    ml = MovieLens()
    print(BeginGREEN + "Loading movie ratings..." + EndGREEN)
    data = ml.loadMovieLensLatestSmall()
    print(
        BeginGREEN +
        "Computing movie popularity ranks so we can measure novelty later..." +
        EndGREEN)
    rankings = ml.getPopularityRanks()
    return (data, rankings)


np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator
evaluator = Evaluator(evaluationData, rankings)

# Throw in an SVD recommender
SVDAlgorithm = SVD(random_state=10)
evaluator.AddAlgorithm(SVDAlgorithm, BeginBgBLUE + "SVD" + EndBgBLUE)

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, BeginBgBLUE + "Random" + EndBgBLUE)

# Fight!
evaluator.Evaluate(True)
Exemplo n.º 31
0
def evaluator(name=None):
    from Evaluator import Evaluator
    return Evaluator(name)
Exemplo n.º 32
0
def train_model(model, optim, train_q_embed, dev_q_embed, dev_q_cand_ids,
                train_pairs, dev_pairs, hparams, log_path, seed):
    """Train model using negative sampling.

    Args:

    - model
    - optim: optimizer
    - train_q_embed: Embedding object for training queries, shape (nb
      train queries, dim)
    - dev_q_embed: Embedding object for dev queries, shape (nb dev
      queries, dim)
    - dev_q_cand_ids: list containing candidate ID of each dev query
      (None if it is not a candidate), used to compute MAP on dev set.
    - train_pairs: array of (query ID, hypernym ID) pairs
      for training
    - dev_pairs: array of (query ID, hypernym ID) pairs for
      validation
    - hparams: dict containing settings of hyperparameters
    - log_path: path of log file
    - seed: seed for RNG

    """

    # Extract hyperparameter settings
    nb_neg_samples = hparams["nb_neg_samples"]
    subsample = hparams["subsample"]
    max_epochs = hparams["max_epochs"]
    patience = hparams["patience"]
    batch_size = hparams["batch_size"]
    clip = hparams["clip"]

    if seed:
        random.seed(seed)
        np.random.seed(seed)

    # Prepare sampling of negative examples
    candidate_ids = list(range(model.get_nb_candidates()))
    cand_sampler = make_sampler(candidate_ids)

    # Prepare subsampling of positive examples
    pos_sample_prob = {}
    if subsample:
        hyp_fd = {}
        for h_id in train_pairs[:, 1]:
            if h_id not in hyp_fd:
                hyp_fd[h_id] = 0
            hyp_fd[h_id] += 1
        min_freq = min(hyp_fd.values())
        for (h_id, freq) in hyp_fd.items():
            pos_sample_prob[h_id] = sqrt(min_freq / freq)

    # Check if we're using CUDA
    if model.use_cuda:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    # Initialize training batch for query IDs, positive hypernym IDs,
    # negative hypernym IDs, positive targets, and negative targets.
    # targets. We separate positive and negative examples to compute
    # the losses separately. Note that this is a bit inefficient, as
    # we compute the query projections twice.
    batch_q = np.zeros(batch_size, 'int64')
    batch_h_pos = np.zeros((batch_size, 1), 'int64')
    batch_h_neg = np.zeros((batch_size, nb_neg_samples), 'int64')
    t_pos_var = torch.ones((batch_size, 1), requires_grad=False, device=device)
    t_neg_var = torch.zeros((batch_size, nb_neg_samples),
                            requires_grad=False,
                            device=device)

    # Prepare list of sets of gold hypernym IDs for queries in
    # training set. This is used for negative sampling.
    nb_train_queries = train_q_embed.weight.shape[0]
    train_gold_ids = [set() for _ in range(nb_train_queries)]
    nb_train_pairs = train_pairs.shape[0]
    for i in range(nb_train_pairs):
        q_id = int(train_pairs[i, 0])
        h_id = int(train_pairs[i, 1])
        train_gold_ids[q_id].add(h_id)

    # Prepare list of sets of gold hypernym IDs for queries in dev set
    # to compute score (MAP)
    nb_dev_queries = dev_q_embed.weight.shape[0]
    dev_gold_ids = [set() for _ in range(nb_dev_queries)]
    nb_dev_pairs = dev_pairs.shape[0]
    for i in range(nb_dev_pairs):
        q_id = int(dev_pairs[i, 0])
        h_id = int(dev_pairs[i, 1])
        dev_gold_ids[q_id].add(h_id)

    # Prepare input variables to compute loss on dev set
    dev_q_ids = torch.tensor(dev_pairs[:, 0], dtype=torch.int64, device=device)
    dev_q_var = dev_q_embed(dev_q_ids)
    dev_h_var = torch.tensor(dev_pairs[:, 1],
                             dtype=torch.int64,
                             requires_grad=False,
                             device=device).unsqueeze(1)
    dev_t_var = torch.ones((nb_dev_pairs, 1),
                           dtype=torch.float32,
                           requires_grad=False,
                           device=device)

    # Make Evaluator to compute MAP on dev set
    dev_eval = Evaluator(model, dev_q_embed, dev_q_cand_ids)

    print("\nEvaluating untrained model on dev set...")
    MAP = dev_eval.get_MAP(dev_gold_ids)
    print("MAP: {:.4f}".format(MAP))

    checkpoint_header = [
        "Epoch", "Updates", "PosLoss", "NegLoss", "DevLoss", "DevMAP",
        "TimeElapsed"
    ]
    with open(log_path, "w") as f:
        f.write("\t".join(checkpoint_header) + "\n")

    # Train model
    best_model = deepcopy(model)
    best_score = float("-inf")
    nb_no_gain = 0
    batch_row_id = 0
    done = False
    start_time = time.time()
    print("\nStarting training...\n")
    print("\t".join(checkpoint_header))
    for epoch in range(1, max_epochs + 1):
        model.train()
        np.random.shuffle(train_pairs)
        total_pos_loss = 0.0
        total_neg_loss = 0.0

        # Loop through training pairs
        nb_updates = 0
        for pair_ix in range(train_pairs.shape[0]):
            q_id = train_pairs[pair_ix, 0]
            h_id = train_pairs[pair_ix, 1]
            if subsample and random.random() >= pos_sample_prob[h_id]:
                continue
            batch_q[batch_row_id] = q_id
            batch_h_pos[batch_row_id] = h_id

            # Get negative examples
            neg_samples = []
            while len(neg_samples) < nb_neg_samples:
                cand_id = next(cand_sampler)
                if cand_id not in train_gold_ids[q_id]:
                    neg_samples.append(cand_id)
            batch_h_neg[batch_row_id] = neg_samples

            # Update on batch
            batch_row_id = (batch_row_id + 1) % batch_size
            if batch_row_id + 1 == batch_size:
                q_ids = torch.tensor(batch_q,
                                     dtype=torch.int64,
                                     requires_grad=False,
                                     device=device)
                q_var = train_q_embed(q_ids)
                h_pos_var = torch.tensor(batch_h_pos,
                                         dtype=torch.int64,
                                         requires_grad=False,
                                         device=device)
                h_neg_var = torch.tensor(batch_h_neg,
                                         dtype=torch.int64,
                                         requires_grad=False,
                                         device=device)
                optim.zero_grad()
                pos_loss = model.get_loss(q_var, h_pos_var, t_pos_var)
                neg_loss = model.get_loss(q_var, h_neg_var, t_neg_var)
                loss = pos_loss + neg_loss
                loss.backward()
                if clip > 0:
                    torch.nn.utils.clip_grad_norm_(train_q_embed.parameters(),
                                                   clip)
                    torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
                optim.step()
                total_pos_loss += pos_loss.item()
                total_neg_loss += neg_loss.item()
                nb_updates += 1

        # Check progress
        avg_pos_loss = total_pos_loss / (nb_updates * batch_size)
        avg_neg_loss = total_neg_loss / (nb_updates * batch_size)

        # Compute loss and MAP on dev set
        model.eval()
        dev_loss = model.get_loss(dev_q_var, dev_h_var, dev_t_var)
        avg_dev_loss = dev_loss.item() / nb_dev_pairs
        MAP = dev_eval.get_MAP(dev_gold_ids)
        checkpoint_data = []
        checkpoint_data.append(str(epoch))
        checkpoint_data.append(str(nb_updates))
        checkpoint_data.append("{:.4f}".format(avg_pos_loss))
        checkpoint_data.append("{:.4f}".format(avg_neg_loss))
        checkpoint_data.append("{:.4f}".format(avg_dev_loss))
        checkpoint_data.append("{:.4f}".format(MAP))
        checkpoint_data.append("{:.1f}s".format(time.time() - start_time))
        print("\t".join(checkpoint_data))
        with open(log_path, "a") as f:
            f.write("\t".join(checkpoint_data) + "\n")

        # Early stopping
        if MAP > best_score:
            best_score = MAP
            best_model = deepcopy(model)
            nb_no_gain = 0
        else:
            nb_no_gain += 1
        if nb_no_gain >= patience:
            print("EARLY STOP!")
            done = True
            print("\nEvaluating best model on dev set...")
            dev_eval.set_model(best_model)
            MAP = dev_eval.get_MAP(dev_gold_ids)
            print("MAP of best model: {:.3f}".format(MAP))
        if done:
            break
    print("\nTraining finished after {} epochs".format(epoch))
    return best_model
Exemplo n.º 33
0
 def __init__(self, listOfPredicates, listOfActions, initialState, goalState, compliantConditions, goalCompliantConditions, Mrref, M, cost_flag = False):
     
     Evaluator.__init__(self, listOfPredicates, listOfActions, initialState, goalState, compliantConditions, goalCompliantConditions, Mrref, M, cost_flag)
Exemplo n.º 34
0
            heuristicName = sys.argv[4].strip()
            print 
        else:
            raise Exception('Invalid Input! Usage: >> python main.py <domainfile> <problemfile> -h <heuristic_name>')
    except:
        heuristicName = 'equality'
        print bcolors.OKGREEN + "--> Default heuristic 'equality'" + bcolors.ENDC

    # parse SAS/PDDL data #
    listOfPredicates, initialState, goalState, listOfActions, compliantConditions, goalCompliantConditions = grounded_data.returnParsedData()

    # generate transformation #
    Mrref, M = compute_transform(listOfPredicates, listOfActions, goalCompliantConditions, debug_flag)

    # evaluate #
    evaluation_object = Evaluator(listOfPredicates, listOfActions, initialState, goalState, compliantConditions, goalCompliantConditions, Mrref, M, cost_flag)
    print bcolors.HEADER + "\n>> Initial state evaluation = " + bcolors.OKBLUE + str(float(evaluation_object.evaluate(initialState, heuristicName))) + bcolors.ENDC    
    sys.exit(0)
    
    # solve #
    plan_object = Planner(listOfPredicates, listOfActions, initialState, goalState, compliantConditions, goalCompliantConditions, Mrref, M, cost_flag)
    plan, cost  = plan_object.aStarSearch(heuristicName)

    if plan:    
        print bcolors.HEADER + "\n>> FINAL PLAN\n--> " + bcolors.OKBLUE + '\n--> '.join(plan) + "\n" + bcolors.OKGREEN + "\nCost of Plan: " + str(cost) + '\n' + bcolors.ENDC
    else:
        if cost == 0.0:
            print bcolors.HEADER + "*** NO PLAN REQUIRED ***" + bcolors.ENDC
        else:
            print bcolors.HEADER + "*** NO PLAN FOUND ***" + bcolors.ENDC    
Exemplo n.º 35
0
Arquivo: pyNS.py Projeto: archTk/pyNS
def runSimulation(simType, defaultNet, wdir, odir, images, xsd, net, mesh, xmlout, bound, netSchema, boundSchema, template, parameters, diameters, days, xmlSol, xmlMesh, writeCsv, plotImages, plotPressure, plotFlow, plotWss, plotReynolds, writePressure, writeFlow, writeWss, writeReynolds, velocityProfile, results, excludeWss, export, automaticResults, inputGnuid):
    
    '''Welcome and instructions messages.'''
    
    print "##########################################"
    print "############ Welcome to pyNS #############"
    print "## ./pyNS -h or --help for instructions ##"
    print "##########################################\n"
            
    '''Exporting results into txt files'''   
    if export is not False:
        if not os.path.exists ('Results/%s/exportedSolutions' % export):
            os.mkdir('Results/%s/exportedSolutions' % export)
        for f in mylistdir('Results/%s/json' % export):
            if f == 'info.json':
                pass
            else:
                print "exporting Results/%s/json/" % export + f
                exporting('Results/%s/json/' % export + f)
                new_file = f.split('.')[0]+'.txt'
                shutil.move('Results/%s/json/' % export + new_file, 'Results/%s/exportedSolutions/' % export + new_file)
        sys.exit('All %s solutions exported successfully in Results/%s/exportedSolutions/ folder' % (export,export))
    
    if not results:
        if defaultNet is True:
            simType = 'specific'
            net = 'vascular_network_arterial_right_arm.xml'
            bound = 'boundary_conditions_arterial_right_arm.xml'
        elif template == 'willis':
            simType = 'specific'
            wdir = 'XML/Models/WillisCircle'
            net = 'vascular_network_willis.xml'
            bound = 'boundary_conditions_willis.xml'
        elif simType == 'specific':
            if net is None and bound is not None:
                sys.exit("Please provide a network graph XML input file or choose a generic simulation type.")
            elif net is not None and bound is None:
                sys.exit("Please provide a boundary conditions XML input file or choose a generic simulation type.")
            elif net is None and bound is None:
                sys.exit("Please provide either a network graph XML input file and a boundary conditions XML input file or choose a generic simulation type.")
    
    '''Checking matplotlib module for optional plotting methods.'''
    if plotImages or plotFlow or plotPressure or plotWss or plotReynolds or velocityProfile is True:
        try:
            import matplotlib
        except ImportError:
            sys.exit('Matplotlib package is required for plotting solutions in .png files or computing velocityProfile videos.\nPlease download matplotlib from matplotlib.sourceforge.net.')
            
    '''Loading previous specific results.'''
    if results is not False:
        while True:
            print "Starting webServer for post-processing results. Close it with CTRL-C."
            Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
            try:
                port = 8000
                httpd = SocketServer.TCPServer(("localhost", port), Handler)
            except:
                try:
                    pid = None
                    for line in os.popen("lsof -i:8000"):
                        fields = line.split()
                        pid = fields[1]
                    if pid:
                        os.system("kill %s" %pid)
                        time.sleep(5)
                    httpd = SocketServer.TCPServer(("localhost", port), Handler)
                except:
                    connected = False
                    startPort = 8000
                    while not connected:
                        try:
                            httpd = SocketServer.TCPServer(("localhost", startPort), Handler)
                            connected = True
                            port = startPort
                        except:
                            startPort+=1
                    
            if results == 'last':
                ip = "http://localhost:%s" %port
                webbrowser.open_new_tab(ip+'/Results/results.html')
            else:
                if os.path.exists('Results/'+results):
                    ip = "http://localhost:%s" %port
                    webbrowser.open_new_tab(ip+"/Results/"+results+"/results.html")
                else:
                    sys.exit('Error: '+results+' directory does not exist.')
            httpd.serve_forever()
        
    '''Checking for webserver instance'''
    if automaticResults:
        try:
            ip = "http://localhost:8000"
            pid = None
            for line in os.popen("lsof -i:8000"):
                fields = line.split()
                pid = fields[1]
            if pid:
                os.system("kill %s" %pid)
            Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
            httpd = SocketServer.TCPServer(("localhost", 8000), Handler)
        except:
            connected = False
            startPort = 8000
            while not connected:
                try:
                    Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
                    httpd = SocketServer.TCPServer(("localhost", startPort), Handler)
                    connected = True
                    port = startPort
                    ip = "http://localhost:%s" %port
                except:
                    startPort+=1
    
    '''SIMULATION'''
    
    '''Create XML and image directories'''
    if not os.path.exists (wdir):
        os.mkdir(wdir)
    if not os.path.exists (xsd):
        os.mkdir(xsd)

    '''If needed, creating output directory(s).'''
    if xmlSol is True or xmlMesh is True or writeFlow is True or writePressure is True or  writeWss is True or writeReynolds is True:
        if not os.path.exists (odir):
            os.mkdir(odir)
    if writeFlow is True:
        ofdir = os.path.join(odir, 'Flow/')
        if not os.path.exists (ofdir):
            os.mkdir(ofdir)
    if writePressure is True:
        opdir = os.path.join(odir, 'Pressure/')
        if not os.path.exists (opdir):
            os.mkdir(opdir)
    if writeWss is True:
        owdir = os.path.join(odir, 'Wss/')
        if not os.path.exists (owdir):
            os.mkdir(owdir)
    if writeReynolds is True:
        oodir = os.path.join(odir, 'Other/')
        if not os.path.exists (oodir):
            os.mkdir(oodir)

    '''If needed, creating images directory.'''
    if plotImages is True:
        f_images = os.path.join(images, 'Flow/')
        p_images = os.path.join(images, 'Pressure/')
        w_images = os.path.join(images, 'Wss/')
        o_images = os.path.join(images, 'Other/')
        if not os.path.exists (images):
            os.mkdir(images)
            os.mkdir(f_images)
            os.mkdir(p_images)
            os.mkdir(w_images)
            os.mkdir(o_images)

    '''Setting variables.'''
    testTube = 'XML/TEST/CircularStraightTube/'
    netTube = 'vascular_network_v3.0_TUBE.xml'
    boundTube = 'boundary_conditions_v2.0_TUBE.xml'
    testTape = 'XML/TEST/CircularTaperedTube/'
    netTape = 'vascular_network_v3.0_TAPE.xml'
    boundTape = 'boundary_conditions_v2.0_TAPE.xml'
    testSimple = 'XML/TEST/SimpleNetwork/'
    netSimple = 'vascular_network_simple.xml'
    boundSimple = 'boundary_conditions_simple.xml'
    testing = 'XML/TEST/Testing/'
    testingNetwork = 'vascular_network_test.xml'
    testingBoundary = 'boundary_conditions_test.xml'

    if simType == 'specific':
        xmlnetpath = os.path.join(wdir, net)
        xmlboundpath = os.path.join(wdir, bound)
        preRun = True
    if simType == 'tube':
        xmlnetpath = os.path.join(testTube,netTube)
        xmlboundpath = os.path.join(testTube, boundTube)
        preRun = False
    if simType == 'tape':
        xmlnetpath = os.path.join(testTape,netTape)
        xmlboundpath = os.path.join(testTape, boundTape)
        preRun = False
    if simType == 'simple':
        xmlnetpath = os.path.join(testSimple,netSimple)
        xmlboundpath = os.path.join(testSimple, boundSimple)
        preRun = False
    if simType == 'testing':
        xmlnetpath = os.path.join(testing,testingNetwork)
        xmlboundpath = os.path.join(testing, testingBoundary)
        preRun = False
  
    xmlmeshpath = os.path.join(wdir, mesh)
    xmloutpath = os.path.join(odir, xmlout)
    xsdnetpath = os.path.join(xsd, netSchema)
    xsdboundpath = os.path.join(xsd, boundSchema)

    '''Setting adaptation and simulation days'''
    adaptation = Adaptation()
    daysList = map(int,list(linspace(-1,days,days+2)))
    if excludeWss is True and days > 0:
        sys.exit("Error: You can't exclude Wss computing for adaptation algorithm")
 
    '''Setting Simulation Context Parameters for Simulation'''
    simulationContext = SimulationContext()
    evaluator = Evaluator()
    evaluator.SetSimulationContext(simulationContext)
    simulationContext.SetEvaluator(evaluator)

    for day in daysList:
        if day <= 0:
            '''Parameters Model Adaptor'''
            if simType == 'generic':
                modelAdaptor = ModelAdaptor()
                modelAdaptor.SetSimulationContext(simulationContext)
                modelAdaptor.SetEvaluator(evaluator)
                modelAdaptor.ChoosingTemplate(parameters)
                if template == 'arm':
                    if day == -1:
                        modelAdaptor.ftype = 7
                    if modelAdaptor.arm == 0:
                        if modelAdaptor.ftype == 0:
                            wdir = 'XML/Models/Left_Arm/#0.Lower_RC_EE'
                            preRun = True
                        if modelAdaptor.ftype == 1:
                            wdir = 'XML/Models/Left_Arm/#1.Lower_RC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 2:
                            pass
                        if modelAdaptor.ftype == 3:
                            wdir = 'XML/Models/Left_Arm/#3.Upper_BC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 4:
                            pass
                        if modelAdaptor.ftype == 5:
                            wdir = 'XML/Models/Left_Arm/#5.Upper_BB_ES'
                            preRun = True
                        if modelAdaptor.ftype == 6:
                            pass
                        if modelAdaptor.ftype == 7:
                            wdir = 'XML/Models/Left_Arm/PRE'
                            preRun = False
                    if modelAdaptor.arm == 1:
                        if modelAdaptor.ftype == 0:
                            wdir = 'XML/Models/Right_Arm/#0.Lower_RC_EE'
                            preRun = True
                        if modelAdaptor.ftype == 1:
                            wdir = 'XML/Models/Right_Arm/#1.Lower_RC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 2:
                            pass
                        if modelAdaptor.ftype == 3:
                            wdir = 'XML/Models/Right_Arm/#3.Upper_BC_ES'
                            preRun = True
                        if modelAdaptor.ftype == 4:
                            pass
                        if modelAdaptor.ftype == 5:
                            wdir = 'XML/Models/Right_Arm/#5.Upper_BB_ES'
                            preRun = True
                        if modelAdaptor.ftype == 6:
                            pass
                        if modelAdaptor.ftype == 7:
                            wdir = 'XML/Models/Right_Arm/PRE'
                            preRun = False
                    
                netPostGeneric = 'vascular_network.xml'
                boundPostGeneric = 'boundary_conditions.xml'
                netPost = modelAdaptor.Idpat+'_vascular_network.xml'
                boundPost = modelAdaptor.Idpat+'_boundary_conditions.xml'
                xmlnetpathGeneric = os.path.join(wdir, netPostGeneric)
                xmlboundpathGeneric = os.path.join(wdir, boundPostGeneric)
                xmlnetpath = os.path.join(wdir, netPost)
                xmlboundpath = os.path.join(wdir, boundPost)
                simulationContext.ReadFromXML(xmlboundpathGeneric, xsdboundpath)
            else:  
                simulationContext.ReadFromXML(xmlboundpath, xsdboundpath)
            
            if simType == 'generic':  
                modelAdaptor.SettingParameters(parameters)
                modelAdaptor.AdaptingParameters(xmlboundpathGeneric,xmlboundpath)
            
            '''Creating NetworkGraph Object From its XML'''
            networkGraph = NetworkGraph()
            if simType == 'generic':
                networkGraph.ReadFromXML(xmlnetpathGeneric, xsdnetpath)
            else:
                networkGraph.ReadFromXML(xmlnetpath, xsdnetpath)
            
            '''NetworkGraph Model Adaptor'''
            if simType == 'generic':
                modelAdaptor.SetNetworkGraph(networkGraph)
                evaluator.SetNetworkGraph(networkGraph)
                if diameters is False:
                    csvfilepath = modelAdaptor.AdaptingModel(xmlnetpathGeneric,xmlnetpath)
                else:
                    csvfilepath = modelAdaptor.AdaptingModel(xmlnetpathGeneric,xmlnetpath,diameters)   
                       
            '''Setting results directory based on PatientID in networkGraph XML file'''
            
            if plotImages is False:
                try:
                    shutil.rmtree('Results/json')
                except:
                    pass
                try:
                    os.mkdir('Results/json')
                except:
                    pass
                if simType == 'generic':
                    idPat = modelAdaptor.Idpat
                elif template == 'willis':
                    idPat = template
                else:
                    idPat = simType
                if os.path.exists('Results/%s' % idPat):
                    pass
                else:
                    os.mkdir('Results/%s' % idPat)
                    os.mkdir('Results/%s/json' % idPat)
                    shutil.copytree('Results/css','Results/%s/css'  % idPat)
                    shutil.copytree('Results/js','Results/%s/js'  % idPat)
                    shutil.copy('Results/results.html','Results/%s/results.html'  % idPat)

            '''Mesh generation, XML Network Graph is needed for creating XML Network Mesh.'''
            meshGenerator = MeshGenerator()
            meshGenerator.SetNetworkGraph(networkGraph)
            networkMesh = NetworkMesh()
            meshGenerator.SetNetworkMesh(networkMesh)
            meshGenerator.SetMaxLength(5.0e-2)
            meshGenerator.GenerateMesh()
            
        '''Setting Boundary Conditions Mesh input and reading XML Boundary Conditions File'''
        boundaryConditions = BoundaryConditions()
        boundaryConditions.SetSimulationContext(simulationContext)
        boundaryConditions.SetNetworkMesh(networkMesh)
        boundaryConditions.ReadFromXML(xmlboundpath, xsdboundpath)
        boundaryConditions.SetSpecificCardiacOutput()
        
        
        '''In case of a generic simulation, patient-specific generated files will be moved to Results folder.'''
        if simType == 'generic' and day < 0:
            shutil.move(os.path.abspath(xmlnetpath),('Results/%s/%s_pre_vascular_network.xml' % (idPat,idPat)))
            shutil.move(os.path.abspath(xmlboundpath),('Results/%s/%s_pre_boundary_conditions.xml' % (idPat,idPat)))
            shutil.move(os.path.abspath(csvfilepath),('Results/%s/%s_pre_patient_specific.csv' % (idPat,idPat)))
        if simType == 'generic' and day == 0:
            shutil.copy(os.path.abspath(xmlnetpath),('Results/%s/%s_post_vascular_network.xml' % (idPat,idPat)))
            shutil.copy(os.path.abspath(xmlboundpath),('Results/%s/%s_post_boundary_conditions.xml' % (idPat,idPat)))
            shutil.copy(os.path.abspath(csvfilepath),('Results/%s/%s_post_patient_specific.csv' % (idPat,idPat)))
        if simType == 'generic' and day > 0 and day == days:
            shutil.move(os.path.abspath(xmlnetpath),('Results/%s/%s_adapted_vascular_network.xml' % (idPat,idPat)))
            shutil.move(os.path.abspath(xmlboundpath),('Results/%s/%s_adapted_boundary_conditions.xml' % (idPat,idPat)))
            shutil.move(os.path.abspath(csvfilepath),('Results/%s/%s_adapted_patient_specific.csv' % (idPat,idPat)))
        
        '''Setting Evaluator'''
        evaluator.SetNetworkGraph(networkGraph)
        evaluator.SetNetworkMesh(networkMesh)

        '''Adaptation Model'''
        adaptation.SetBoundaryConditions(boundaryConditions)
        adaptation.SetSimulationContext(simulationContext)
        preRun = adaptation.Adapt(day)
        if len(daysList)==1:
            pass
        else:
            print "Day %d " %(day*10)  	#1 step represent 10 days

        ''' Setting Solver Class'''
        solver = SolverFirstTrapezoid()  
        solver.SetNetworkMesh(networkMesh)
        solver.SetBoundaryConditions(boundaryConditions)
        solver.SetSimulationContext(simulationContext)
        solver.SetEvaluator(evaluator)
    
        '''Pre-run'''
        if preRun is True:
            solver.SetSteadyFlow()
            print "Steady Pre-Run, setting non-linear parameters"
            solver.Solve()
            parametersToLinear = ["Radius","Compliance"]
            for el in networkMesh.Elements:
                el.SetLinearValues(parametersToLinear)
            networkMesh.checkLinearConsistence()
    
        '''Run'''
        evaluator.ExpressionCache = {}
        solver = SolverFirstTrapezoid()
        solver.SetNetworkMesh(networkMesh)
        solver.SetBoundaryConditions(boundaryConditions)
        solver.SetSimulationContext(simulationContext)
        solver.SetEvaluator(evaluator) 
        solver.SetPulseFlow()
        print "Solving system"
        solver.Solve()

        '''Post Processing: Setting Solutions input and plotting some information and/or writing solutions to XML Solutions File'''
        '''User can choose two different post processing strategies. Saving images using matplotlib or visualize results in its browser'''

        '''If needed, pyNS writes xml mesh file'''
        if xmlMesh is True:
            meshdirpath = os.path.join(odir,str(day))
            if not os.path.exists(meshdirpath):
                os.mkdir(meshdirpath)
            xmlmeshpath = os.path.join(meshdirpath,mesh)
            outdirpath = os.path.join(odir,str(day))
            if not os.path.exists(outdirpath):
                os.mkdir(outdirpath)
            xmloutpath = os.path.join(outdirpath,xmlout)
            networkMesh.WriteToXML(xmlmeshpath)
    
        '''Setting NetworkSolutions'''
        print "->100%, Running post-processing"
        networkSolutions = NetworkSolutions()
        networkSolutions.SetNetworkMesh(networkMesh)
        networkSolutions.SetNetworkGraph(networkGraph)
        networkSolutions.SetSimulationContext(simulationContext)
        networkSolutions.SetSolutions(solver.Solutions) 
        networkSolutions.WriteJsonInfo(days,networkMesh.Elements,idPat)
        adaptation.SetSolutions(day, networkSolutions)
        adaptation.SetRefValues(day, networkMesh)
    
        '''If needed, pyNS creates images subdirectory(s) for each adaptation step.'''
        if plotImages is True:
            daystr = str(day)+'/'
            f_dayImages = os.path.join(f_images,daystr)   
            p_dayImages = os.path.join(p_images,daystr)
            w_dayImages = os.path.join(w_images,daystr)
            o_dayImages = os.path.join(o_images,daystr)
            if not os.path.exists(images):
                os.mkdir(images)
            if not os.path.exists(f_dayImages):
                os.mkdir(f_dayImages)
            if not os.path.exists(p_dayImages):
                os.mkdir(p_dayImages)
            if not os.path.exists(w_dayImages):
                os.mkdir(w_dayImages)
            if not os.path.exists(o_dayImages):
                os.mkdir(o_dayImages)
            networkSolutions.SetImagesPath({'im':images,'f':f_dayImages,'p':p_dayImages,'w':w_dayImages,'o':o_dayImages})    
        
        '''If needed, pyNS creates output subdirectory(s) for each adaptation step.'''       
        if writeFlow is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day)+'/'
            f_dayOutput = os.path.join(ofdir,daystr) 
            if not os.path.exists(f_dayOutput):
                os.mkdir(f_dayOutput)
        if writePressure is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day)+'/'
            p_dayOutput = os.path.join(opdir,daystr) 
            if not os.path.exists(p_dayOutput):
                os.mkdir(p_dayOutput)
        if writeWss is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day)+'/'
            w_dayOutput = os.path.join(owdir,daystr)
            if not os.path.exists(w_dayOutput):
                os.mkdir(w_dayOutput)
        if writeReynolds is True:
            if day == -1:
                daystr = 'pre/'
            else:
                daystr = str(day)+'/'
            o_dayOutput = os.path.join(oodir,daystr) 
            if not os.path.exists(o_dayOutput):
                os.mkdir(o_dayOutput)
        
        '''If needed, pyNS writes xml Solution file.'''
        if xmlSol is True:
            networkSolutions.WriteToXML(xmloutpath)
    
        '''Post process solution for each element of the network'''  
        for element in networkMesh.Elements:  
            if element.Type == 'WavePropagation' or element.Type == 'Resistance':
                networkSolutions.WriteJson(element.Id, day, excludeWss, idPat)
                if velocityProfile is True:
                    networkSolutions.SaveVelocityProfile(element,str(day))
                if plotFlow is True:
                    networkSolutions.PlotFlow(element.Id)
                if plotPressure is True:
                    networkSolutions.PlotPressure(element.Id)
                if plotWss is True:
                    networkSolutions.PlotWSS(element)
                if plotReynolds is True:
                    networkSolutions.PlotReynolds(element.Id)
                if writeFlow is True:
                    networkSolutions.WriteFlowOutput(element.Id,f_dayOutput+'Flow_'+element.Name+'.txt')
                if writePressure is True:
                    networkSolutions.WritePressureInput(element.Id,p_dayOutput+'/p_in_'+element.Name+'.txt')
                    networkSolutions.WritePressureOutput(element.Id,p_dayOutput+'/p_out_'+element.Name+'.txt')
                    networkSolutions.WritePressureDrop(element.Id,p_dayOutput+'/p_drop_'+element.Name+'.txt')
                if writeWss is True:
                    networkSolutions.WriteWSSOutput(element.Id,w_dayOutput+'WSS_'+element.Name+'.txt')
                if writeReynolds is True:
                    networkSolutions.WriteReynolds(element.Id,o_dayOutput+'Reynolds'+element.Name+'.txt')
                
    '''Adaptation data'''
    if days > 0:
        networkSolutions.WriteJsonAdapt(adaptation, idPat)
        if writeCsv is True:
            networkSolutions.WriteToCsv(adaptation, 'Diameter')
            networkSolutions.WriteToCsv(adaptation, 'Pressure')
            networkSolutions.WriteToCsv(adaptation, 'Flow')
            networkSolutions.WriteToCsv(adaptation, 'Wss')
    
    '''Export GNUID'''
    if inputGnuid:
        networkSolutions.GetGnuidInformation(idPat, inputGnuid)
     
    print "\nJOB FINISHED"
    if automaticResults:
        try:
            shutil.copytree('Results/%s/json' % idPat,'Results/json',symlinks=True)
        except OSError:
            shutil.rmtree('Results/json')
            shutil.copytree('Results/%s/json' % idPat,'Results/json',symlinks=True)
        print "Starting webServer for post-processing results. Close it with CTRL-C."
        webbrowser.open_new_tab(ip+'/Results/results.html')
        httpd.serve_forever()
Exemplo n.º 36
0
def evaluate(logger, graph, database_dir, test_data_dir, num_similar, treshold):
    """ Evaluates the classifier frozen in graph using the images of
        database_dir for comparison and the images of test_image_path
        for evaluation. The parameters num_similar nad treshold describe
        how many of the most similar images should be considered for
        construction of the convex hull and which images to consider 
        similar at all.
    """
    evaluator = Evaluator(logger, graph, database_dir, NN_START_RELIABILITY, NN_RELIABILITY_DELTA)

    iterations = 1
    nn_reliability = 100
    
    datasetStart = datetime.datetime.now()
    sum_classification_time = 0
    min_classification_time = 100000
    max_classification_time = 0
    sum_prediction_time = 0
    min_prediction_time = 100000
    max_prediction_time = 0
    sum_error = 0
    min_error = 100000
    max_error = 0
    
    # Write CSV file header    
    HEADER = "Data set;N;D_N;D_%;C_Min;C_Avg;C_Max;P_Min;P_Avg;P_Max;E_Min;E_Avg;E_Max;TP;FP\n"
    if not os.path.isfile(CSV_FILE):
        with open(CSV_FILE, "a") as myfile:
            myfile.write(HEADER)
        
    # Start estimation
    logger.info("[Main] Base NN reliability is: " + str(nn_reliability))
    differences = []
    for filename in sorted(os.listdir(test_data_dir)):
        if filename.endswith(".png"):
            path = test_data_dir + filename
            failure, error, c_time, p_time = evaluator.evaluate_nn_for_image(path, IM_RESIZE_HEIGHT, IM_RESIZE_WIDTH)
            if not failure is None:
                differences.append(failure)
           
            min_classification_time = min(min_classification_time, c_time)
            max_classification_time = max(max_classification_time, c_time)
            sum_classification_time = sum_classification_time + c_time
            
            min_prediction_time = min(min_prediction_time, p_time)
            max_prediction_time = max(max_prediction_time, p_time)
            sum_prediction_time = sum_prediction_time + p_time#
            
            min_error = min(min_error, error)
            max_error = max(max_error, error)
            sum_error = sum_error + error
            
            logger.info("[Main] NN reliability after seeing " + str(iterations) + " files is now: " + str(evaluator.get_nn_reliability()))
            iterations += 1
    
    datasetDuration         = (datetime.datetime.now() - datasetStart).total_seconds()
    difference_quota        = float(len(differences))        / iterations
    avg_classification_time = float(sum_classification_time) / iterations
    avg_prediction_time     = float(sum_prediction_time)     / iterations
    avg_error               = float(sum_error)               / iterations
    
    logger.info("[Main] Resulting NN reliability is: " + str(evaluator.get_nn_reliability()))
    logger.info("[Main] NN and predictor differ in " + str(difference_quota) + " %: " + str(differences))
    logger.info("[Main] Overall data set processing time   = " + str(datasetDuration) + " s")
    logger.info("[Main] Classfication times (min,mean,max)s  = (" + str(min_classification_time) + ", " + str(avg_classification_time) + ", " + str(max_classification_time) + ")")
    logger.info("[Main] Prediction times    (min,mean,max)s  = (" + str(min_prediction_time) + ", " + str(avg_prediction_time) + ", " + str(max_prediction_time) + ")")
    logger.info("[Main] Absolute errors     (min,mean,max)px = (" + str(min_error) + ", " + str(avg_error) + ", " + str(max_error) + ")")
    
    line = test_data_dir + ";" \
         + str(iterations-1) \
         + ";" + str(len(differences)) \
         + ";" + str(difference_quota) \
         + ";" + str(min_classification_time) \
         + ";" + str(avg_classification_time) \
         + ";" + str(max_classification_time) \
         + ";" + str(min_prediction_time) \
         + ";" + str(avg_prediction_time) \
         + ";" + str(max_prediction_time) \
         + ";" + str(min_error) \
         + ";" + str(avg_error) \
         + ";" + str(max_error) \
         + "\n"
         
    logger.info("[Main] For CSV <" + line + ">")
    
    with open(CSV_FILE, "a") as myfile:
        myfile.write(line)
    
    if len(differences) > 0:
        # Report differences
        failure_dir = test_data_dir.replace('/test-data','/tmp')
        # Maybe create output dir
        if not os.path.exists(failure_dir):
            os.makedirs(failure_dir)
        logger.info("[Main] Reporting " + str(len(differences)) + " differences to " + failure_dir) 
        for diff in differences:
            path = diff[0]
            expectation = diff[1]
            x = diff[2]
            # Load differencing image
            img = cv2.imread(path,0)
            # Annotate and save image
            failure_path = path.replace('.png', '.error.png').replace('/test-data','/tmp')
            logger.info("[Main] Reporting failed image to " + failure_path) 
            cv2.circle(img, (x,0), 5, (255,255,255), -1)
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(img,expectation,(30,260), font, 1,(255,255,255), 2)
            cv2.imwrite(failure_path, img)
Exemplo n.º 37
0
class BPRMF(InputData):
    '''
    BPRMF: implicit matrix factorization, Bayesian Personalized Ranking
    '''
    def __init__(self, train_file, test_file, topK=20, num_factor=30, num_iteration=10, learning_rate=0.05, bias_reg_param = 1, reg_param = 0.0025, \
                 neg_reg_param = 0.00025, num_neg_sample=10):
        InputData.__init__(self, train_file, test_file)
        num_item = len(self.item_hash)
        num_user = len(self.uid_hash)
        self.topK = topK
        self.num_item = num_item
        self.num_user = num_user
        self.num_factor = num_factor
        self.num_iteration = num_iteration
        self.learning_rate = learning_rate
        self.bias_reg_param = bias_reg_param
        self.reg_param = reg_param
        self.neg_reg_param = neg_reg_param
        self.num_neg_sample = num_neg_sample

        self.counts = None
        self.uid_predict = {}
        self.uid_recommend = {}
        self.evaluator = None
        self.user_vectors = np.random.random_sample((self.num_user, self.num_factor))
        self.item_vectors = np.random.random_sample((self.num_item, self.num_factor))
        self.item_bias = np.zeros(self.num_item)
        print "number of user % i" % self.num_user
        print "number of item %i " % self.num_item

        self.recommend()

    def recommend(self, ):
        self.__train_model()
        for i in xrange(self.num_user):
            self.uid_predict[i] = {}
            for j in xrange(self.num_item):
                if j not in self.train_tuple[i]:
                    self.uid_predict[i][j] = np.dot(self.user_vectors[i], self.item_vectors[j]) + self.item_bias[j]
            predict = self.uid_predict[i]
            predict = sorted(predict.iteritems(), key=lambda e: e[1], reverse=True)
            recommend_result = predict[:self.topK]
            recommend_result = [elem[0] for elem in recommend_result]
            self.uid_recommend[i] = recommend_result

    def evaluation(self, ):
        self.evaluator = Evaluator(self.test_tuple, self.uid_recommend, self.num_user, self.num_item, self.topK)
        self.evaluator.prec_recall()

    def __update(self, u, i, j):
        x = self.item_bias[i] - self.item_bias[j] + np.dot(self.user_vectors[u, :], self.item_vectors[i, :]-self.item_vectors[j, :])
        if x > 9:
            z = 0
        elif x < -9:
            z = 1
        else:
            z = 1.0 / (1.0 + exp(x))
        #update parameters
        self.item_bias[i] += self.learning_rate * (z - self.bias_reg_param * self.item_bias[i])
        self.item_bias[j] += self.learning_rate * (-z - self.bias_reg_param * self.item_bias[j])
        self.user_vectors[u, :] += self.learning_rate * ((self.item_vectors[i, :] - self.item_vectors[j, :]) * z - self.reg_param * self.user_vectors[u, :])
        self.item_vectors[i, :] += self.learning_rate * (self.user_vectors[u, :] * z - self.reg_param * self.item_vectors[i, :])
        self.item_vectors[j, :] += self.learning_rate * (-self.user_vectors[u, :] * z - self.neg_reg_param * self.item_vectors[j, :])

    def __iteration(self):
        for u in xrange(self.num_user):
            for i in self.train_tuple[u]:
                num_sample = self.num_neg_sample
                while num_sample:
                    num_sample -= 1
                    j = np.random.randint(0, self.num_item)
                    while j in self.train_tuple[u]:
                        j = np.random.randint(0, self.num_item)
                    self.__update(u, i, j)

    def __random_iteration(self,):
        user_item_pairs = []
        for u in xrange(self.num_user):
            for i in self.train_tuple[u]:
                user_item_pairs.append((u, i))
        import random
        random.shuffle(user_item_pairs)
        for u, i in user_item_pairs:
            num_sample = self.num_neg_sample
            while num_sample:
                num_sample -= 1
                j = np.random.randint(0, self.num_item)
                while j in self.train_tuple[u]:
                    j = np.random.randint(0, self.num_item)
                self.__update(u, i, j)


    def __train_model(self):
        for i in xrange(self.num_iteration):
            t0 = time.time()
            self.__iteration()
            print 'iteration %i costs time %f' % (i+1, time.time() - t0)
def LoadMovieLensData():
    ml = MovieLens()
    print(BeginGREEN + "Loading movie ratings..." + EndGREEN)
    data = ml.loadMovieLensLatestSmall()
    print(BeginGREEN + "Computing movie popularity ranks so we can measure novelty later..." + EndGREEN)
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)

np.random.seed(0)
random.seed(0)

# Load up common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

# Construct an Evaluator 
evaluator = Evaluator(evaluationData, rankings)

# User-based KNN
UserKNN = KNNBasic(sim_options = {'name': 'cosine', 'user_based': True})
evaluator.AddAlgorithm(UserKNN, BeginBgBLUE + "User KNN" + EndBLUE)

# Item-based KNN
ItemKNN = KNNBasic(sim_options = {'name': 'cosine', 'user_based': False})
evaluator.AddAlgorithm(ItemKNN, BeginBgBLUE + "Item KNN" + EndBLUE)

# Just make random recommendations
Random = NormalPredictor()
evaluator.AddAlgorithm(Random, BeginBgBLUE + "Random" + EndBLUE)

# Fight!
evaluator.Evaluate(False)
from Evaluator import Evaluator
from surprise import KNNBasic
from time import time


def LoadMovieLensData():
    ml = MovieLens()
    print('Loading movie ratings..')
    data = ml.loadMovieLensDataset()
    #Compute movie popularity ranks to measure novelty
    rankings = ml.getPopularityRanks()
    return (ml, data, rankings)


#Load the common data set for the recommender algorithms
(ml, evaluationData, rankings) = LoadMovieLensData()

#create an evaluator which is an object of Evaluator class
evaluator = Evaluator(evaluationData, rankings)

t0 = time()
#User-Based KNN
sim_options_user = {'name': 'cosine', 'user_based': True}
userKNN = KNNBasic(sim_options=sim_options_user)
evaluator.AddAlgorithm(userKNN, "User KNN")

evaluator.Evaluate(True)

evaluator.SampleTopNRecs(ml)
tt = time() - t0
print("User based CF Model trained in %s seconds" % round(tt, 3))
Exemplo n.º 40
0
#
# Min Lee
# [email protected]
# MacOS
# Python
#
# In accordance with the class policies and Georgetown's Honor Code,
# I certify that, with the exceptions of the class resources and those
# items noted below, I have neither given nor received any assistance
# on this project.
#


import sys
from Classifier import backprop
from Evaluator import Evaluator

classifier = backprop(sys.argv)
evaluator = Evaluator(sys.argv)
performance = evaluator.evaluate(classifier, sys.argv)

print performance
Exemplo n.º 41
0
 def evaluation(self, ):
     self.evaluator = Evaluator(self.test_tuple, self.uid_recommend, self.num_user, self.num_item, self.topK)
     self.evaluator.prec_recall()