def __init__(self,*args,**kwargs): #Setup log self.logger = logging.getLogger(__name__) super(ModelIO,self).__init__() self._subRefs = {} self._objRefs = {} self._modelRefs = {} self._modelName = kwargs.get('modelName') self._objectList = ['queue','thread','semaphore','event','subscribe'] self._ModelConfig = ModelConfig(kwargs.get('filename')) #*args,**kwargs) self._modelManager = ModelManager() self._id = uuid.uuid1().int self._modelAttributes = self._ModelConfig.getModelAttributes(self.getModelName()) self._OnSuccess = self.getModelAttributes().get('OnSuccess') self._OnFailure = self.getModelAttributes().get('OnFailure') self._model_type = self.getModelAttributes().get('type') self._model = ModelInit(modelId=self.getId(),*args,**kwargs,**self._modelAttributes,ports=self._ModelConfig.getModelInputPorts(self.getModelName())) self.setModelRefs() if (self.getModelType() == 'connection'): self._modelManager.setConnectionRefs(modelRefs=self.getModelRefs(),connectionObject=kwargs.get('connectionObject')) self._modelManager.setConnectionModels(model=self.getModel(),connectionObject=kwargs.get('connectionObject')) else: self._modelManager.setModelRefs(objRefs=self.getObjRefs(),modelName=self.getModelName()) self._modelManager.setModels(model=self.getModel(),modelName=self.getModelName())
def __init__(self): self.yd = YahooDataset() self.movieID_to_info = self.yd.loadMovies() self.fullTrainSet = self.yd.loadFullSet() self.models = ModelManager().getAllModels() self.fullTrainSet.rating_scale = (1, 13)
def initGui(): if not Builder.isTraining() and not Builder.isReply(): return Builder.mm = ModelManager(Builder.getMapName()) Builder.currentPath = Builder.pm.getNextPath() Builder.inBattle = True Builder.moving = False Builder.blockMove = False
def deleteModel(self, algo, model): path = "models/" + algo + "/" + model try: ModelManager().deleteModel(path) except OSError as e: return "error" self.models[algo].remove(model) return "success"
def genetic_magic(symbol_sequences, true_probs): # create new model manager manager = ModelManager(symbol_sequences, number_of_models=len(symbol_sequences), export_fitness_fnc_results=EXPORT_FITNESS_RESULTS) # run model epochs manager.run(10) # show the probability distribution of the different sequences in the model pred_probs = manager.get_best_model_probility() return get_g_score(pred_probs, true_probs)
def __init__(self, *args, **kwargs): # Setup log configurations logSetup.logSetup() self.logger = logging.getLogger(__name__) self._ModelConfig = ModelConfig(kwargs.get('filename')) self._modelNames = self._ModelConfig.getModels() self._ModelCreate = ModelCreate.ModelCreate(*args, **kwargs) self._modelInit = threading.Event() self.createModels(*args, **kwargs) self._ModelManager = ModelManager() self.createConnections(*args, **kwargs) self.startThreads()
def __init__(self, key): """Constructor for a client object """ self.key = key self.dictionaryManager = DictionaryManager(key) self.entityManager = EntityManager(key) self.conceptManager = ConceptManager(key) self.modelManager = ModelManager(key) self.categoryManager = CategoryManager(key) # Default parameter values for /semantic_tagging self.fields = '' self.filter_data = 'y' # Default parameter values for /check self.mode = 'all' self.group_errors = '2' self.check_spacing = 'n'
self.recomHeavy = self.recommendedLevel(maxHeavy) message = "The minimum legal standard for light and heavy impact sound is 50 and 58dB. To satisfy this" \ "standard, technology with light impact reduction grade of " + str(self.recomLight) + "and heavy impact" \ "reduction grade of " + str(self.recomHeavy) + " is required. Do you want to add this to the searching condition?" self.messageUI.textBrowserMessage.setText(message) self.messageDialog.show() def recommendedLevel(self, intensity): if intensity < 20: return 4 if intensity < 40: return 3 if intensity < 60: return 2 return 1 def connectComboBoxSliderAndLabel(self, comboBox, slider, label): comboBox.currentIndexChanged.connect( lambda: self.initSlider(slider, comboBox, label)) slider.sliderMoved.connect( lambda text=str(slider.value()): self.initLabel(label, text)) if __name__ == "__main__": data = DBManager() model = ModelManager(data, "techs") app = QApplication(sys.argv) interface = Interface(model) interface.show() sys.exit(app.exec_())
import warnings warnings.filterwarnings("ignore") from PIL import Image #torch import torch import torch.nn.functional as F import torchvision from torchvision import datasets, transforms, models from torch import nn #parameters Loading from AppParametersLoader import AppParametersLoader parameters = AppParametersLoader() parameters.print_all() #Data Loading from DataManager import DataManager data_manager = DataManager() #previous saved model loading from ModelManager import ModelManager chk_path = parameters.save_dir() + '/checkpoint.pth' model_manager = ModelManager(parameters.gpu()) model_manager.load_model(chk_path) #prediction image_to_predict = data_manager.process_image(parameters.image_path()) top_ps, top_classes = model_manager.predict(image_to_predict, topk=parameters.top_k()) #Getting categories names by class labels cat_to_name = data_manager.get_cat_to_name(parameters.category_names_path()) category_names = [] for i in top_classes: category_names += [cat_to_name[i]] print(f"Predicition: probabilities: {top_ps} ") print(f"Prediction: classes: {category_names} ")
def recommend(self, params): user = self.fullTrainSet.to_inner_uid(params["user"]) antiTestSet = self._buildAntiTestSetForUser(user) algo = params["algorithm"] path = "models/" + algo if algo == "svd": if "models" not in params.keys(): args = { "random_state": 0, "reg_all": float(params["rr"]), "lr_all": float(params["lr"]), "n_epochs": int(params["ne"]), "n_factors": int(params["factors"]) } svd = SVD(**args) svd = svd.fit(self.fullTrainSet) predictions = svd.test(antiTestSet) if "name" in params.keys(): mm = ModelManager() name = params["name"] path = path + "/" + name mm.saveModel(svd, path) self.models[algo].append(name) else: mm = ModelManager() model = params["models"] path = path + "/" + model svd, _ = mm.loadModel(path) predictions = svd.test(antiTestSet) topN = self._getTopNForUser(predictions) topN = [(self.getAdditionalData(movieId), int(round(estimated, 0)))for movieId, estimated in topN] elif algo == "knnItemBaseline": if "models" not in params.keys(): args = { "sim_options" : {'name': 'cosine', 'user_based': False}, "k": int(params["k"]) } knn = KNNBaseline(**args) knn = knn.fit(self.fullTrainSet) predictions = knn.test(antiTestSet) if "name" in params.keys(): mm = ModelManager() name = params["name"] path = path + "/" + name mm.saveModel(knn, path) self.models[algo].append(name) else: mm = ModelManager() model = params["models"] path = path + "/" + model knn, _ = mm.loadModel(path) predictions = knn.test(antiTestSet) topN = self._getTopNForUser(predictions) topN = [(self.getAdditionalData(movieId), int(round(estimated, 0))) for movieId, estimated in topN] elif algo == "weightedHybrid": svd = SVD(random_state=0, reg_all=0.1, lr_all=0.003, n_factors=30, verbose=False) knn = KNNBaseline(sim_options={'name': 'cosine', 'user_based': False}, k=150) weightedHybrid = WeightedHybridAlgorithm(svd, knn, weights=[0.6, 0.4]) weightedHybrid.fit(self.fullTrainSet) predictions = weightedHybrid.test(antiTestSet) topN = self._getTopNForUser(predictions) topN = [(self.getAdditionalData(movieId), int(round(estimated, 0))) for movieId, estimated in topN] elif algo == "userCollaborative": if "models" not in params.keys(): args = { "k": int(params["k"]), "sim_options": {'name': 'cosine', 'user_based': True} } knn = knnRecAlgorithm(**args) knn = knn.fit(self.fullTrainSet) predictions = knn.test(antiTestSet) if "name" in params.keys(): mm = ModelManager() name = params["name"] path = path + "/" + name mm.saveModel(knn, path) self.models[algo].append(name) else: mm = ModelManager() model = params["models"] path = path + "/" + model knn, _ = mm.loadModel(path) predictions = knn.test(antiTestSet) topN = self._getTopNForUser(predictions, minimumRating=0.0) # topN = [(self.getAdditionalData(movieId), round(estimated, 2)) for movieId, estimated in topN] topN = [(self.getAdditionalData(movieId), "") for movieId, estimated in topN] elif algo == "bpr": if "models" not in params.keys(): args = { "reg": float(params["rr"]), 'learning_rate': float(params["lr"]), 'n_iters': int(params["ni"]), 'n_factors': int(params["factors"]), 'batch_size': 100 } bpr = BPRecommender(args) bpr = bpr.fit() if "name" in params.keys(): mm = ModelManager() name = params["name"] path = path + "/" + name mm.saveBprModel(bpr, path) self.models[algo].append(name) else: mm = ModelManager() model = params["models"] path = path + "/" + model bpr = mm.loadBprModel(path) topN = bpr.recommend(user) topN = [(self.getAdditionalData(movieId), "") for movieId in topN] return topN
Created on Sun Dec 13 20:52:09 2020 @author: Dimo """ import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import matplotlib.pyplot as plt from resnet18 import Resnet18 from DS import LoadData from ModelManager import ModelManager if __name__ == "__main__": manager = ModelManager(ModelRoot='./model') #train for the known model manager.train_known(1,30,40) manager.validate() for i in range(3): #start unknown part print('Calculate sample probability...') probs = manager.predict_probability(10000) #budget of searching # print(len(temp[1])) logprob = torch.log(probs[1]) E = logprob*probs[1] E = E.sum(1) probs1 = list(zip(probs[0],E)) # print(probs1) res = sorted(probs1, key = lambda x: x[1])
sequence_file_path = "./data/generated_data/" + sys.argv[1] with open(sequence_file_path) as file: lines = file.readlines() for line in lines: symbols = line.strip().split(",") # check that the start end endsymbol are not use assert 'x' not in symbols and 'o' not in symbols symbol_sequences.append(symbols) print(f"[{len(symbol_sequences)} sequences loaded]") # create to be estimated from symbol_sequence manager = ModelManager(symbol_sequences, 10) # run model epochs manager.run(10) # show the probability distribution of the different sequences in the model pred_probs = manager.get_best_model_probility() # print(pred_probs) with open("./data/MODEL_DEFINTION.txt") as file: lines = file.readlines() true_probs = [] for line in lines: line = line.strip() [trace, prob] = line.split(" ")