Esempio n. 1
0
    def __init__(self,*args,**kwargs):
#Setup log
        self.logger = logging.getLogger(__name__)

        super(ModelIO,self).__init__()
        self._subRefs = {}
        self._objRefs = {}
        self._modelRefs = {}

        self._modelName = kwargs.get('modelName')
        self._objectList = ['queue','thread','semaphore','event','subscribe']
        self._ModelConfig = ModelConfig(kwargs.get('filename')) #*args,**kwargs)
        self._modelManager = ModelManager()

        self._id =  uuid.uuid1().int
        self._modelAttributes = self._ModelConfig.getModelAttributes(self.getModelName()) 
        self._OnSuccess = self.getModelAttributes().get('OnSuccess')
        self._OnFailure = self.getModelAttributes().get('OnFailure')
        self._model_type = self.getModelAttributes().get('type')
        self._model = ModelInit(modelId=self.getId(),*args,**kwargs,**self._modelAttributes,ports=self._ModelConfig.getModelInputPorts(self.getModelName()))
        self.setModelRefs()

        if (self.getModelType() == 'connection'):
            self._modelManager.setConnectionRefs(modelRefs=self.getModelRefs(),connectionObject=kwargs.get('connectionObject'))
            self._modelManager.setConnectionModels(model=self.getModel(),connectionObject=kwargs.get('connectionObject'))
        else:
            self._modelManager.setModelRefs(objRefs=self.getObjRefs(),modelName=self.getModelName()) 
            self._modelManager.setModels(model=self.getModel(),modelName=self.getModelName()) 
Esempio n. 2
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch FrontNet')
    args = Parse(parser)

    torch.manual_seed(args.seed)

    # [NeMO] Setup of console logging.
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        datefmt="%Y-%m-%d %H:%M:%S",
                        filename="log.txt",
                        filemode='w')


    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    train_loader, validation_loader, test_loader = LoadData(args)

    # [NeMO] Loading of the JSON regime file.
    regime = {}
    if args.regime is None:
        print("ERROR!!! Missing regime JSON.")
        raise Exception
    else:
        with open(args.regime, "r") as f:
            rr = json.load(f)
        for k in rr.keys():
            try:
                regime[int(k)] = rr[k]
            except ValueError:
                regime[k] = rr[k]

    if args.gray is not None:
        model = Dronet(PreActBlock, [1, 1, 1], True)
    else:
        model = Dronet(PreActBlock, [1, 1, 1], False)

    # [NeMO] This used to preload the model with pretrained weights.
    if args.load_model is not None:
        ModelManager.Read(args.load_model, model)

    trainer = ModelTrainer(model, args, regime)
    if args.quantize:
        #logging.disable(logging.INFO)
        trainer.Quantize(validation_loader)


    #trainer.Train(train_loader, validation_loader)
    #trainer.Predict(test_loader)

    if args.save_model is not None:
        ModelManager.Write(trainer.GetModel(), 100, args.save_model)
Esempio n. 3
0
class ModelBootstrap(object):
    def __init__(self, *args, **kwargs):
        # Setup log configurations
        logSetup.logSetup()
        self.logger = logging.getLogger(__name__)

        self._ModelConfig = ModelConfig(kwargs.get('filename'))
        self._modelNames = self._ModelConfig.getModels()
        self._ModelCreate = ModelCreate.ModelCreate(*args, **kwargs)
        self._modelInit = threading.Event()
        self.createModels(*args, **kwargs)
        self._ModelManager = ModelManager()
        self.createConnections(*args, **kwargs)
        self.startThreads()

    def createModels(self, *args, **kwargs):
        for modelName in self._modelNames:
            self._ModelCreate.create(modelName=modelName,
                                     model_init_event=self._modelInit,
                                     *args,
                                     **kwargs)

    def createConnections(self, *args, **kwargs):
        for _connection in self._ModelManager.getModelConnections():
            self._ModelCreate.create(modelName='connection',
                                     connectionObject=_connection,
                                     model_init_event=self._modelInit,
                                     *args,
                                     **kwargs)

    async def startParallelThreads(self, _thread, modelName):
        _thread.setDaemon(True)
        _thread.start()
        if _thread.isDaemon():
            self.logger.debug('Model - %s, Started daemon thread - %s',
                              modelName, _thread.getName())
        else:
            self.logger.debug('Model - %s, Started non-daemon thread - %s',
                              modelName, _thread.getName())

    def startThreads(self, *args, **kwargs):
        self.logger.info('Starting Threads')
        tasks = []
        for modelName, _threads in self._ModelManager.getModelThreads():
            for _thread in _threads:
                loop = asyncio.get_event_loop()
                tasks.append(
                    asyncio.ensure_future(self.startParallelThreads(
                        _thread, modelName),
                                          loop=loop))
        loop.run_until_complete(asyncio.gather(*tasks,
                                               return_exceptions=False))
        #print('init ok')
        self._modelInit.set()
        self.logger.info('Started all Threads.')
Esempio n. 4
0
    def __init__(self, *args, **kwargs):
        # Setup log configurations
        logSetup.logSetup()
        self.logger = logging.getLogger(__name__)

        self._ModelConfig = ModelConfig(kwargs.get('filename'))
        self._modelNames = self._ModelConfig.getModels()
        self._ModelCreate = ModelCreate.ModelCreate(*args, **kwargs)
        self._modelInit = threading.Event()
        self.createModels(*args, **kwargs)
        self._ModelManager = ModelManager()
        self.createConnections(*args, **kwargs)
        self.startThreads()
Esempio n. 5
0
def genetic_magic(symbol_sequences, true_probs):
    # create new model manager
    manager = ModelManager(symbol_sequences,
                           number_of_models=len(symbol_sequences),
                           export_fitness_fnc_results=EXPORT_FITNESS_RESULTS)

    # run model epochs
    manager.run(10)

    # show the probability distribution of the different sequences in the model
    pred_probs = manager.get_best_model_probility()

    return get_g_score(pred_probs, true_probs)
Esempio n. 6
0
    def __init__(self,*args,**kwargs):
#Setup log
        self.logger = logging.getLogger(__name__)

        super(ModelIO,self).__init__()
        self._subRefs = {}
        self._objRefs = {}
        self._modelRefs = {}

        self._modelName = kwargs.get('modelName')
        self._objectList = ['queue','thread','semaphore','event','subscribe']
        self._ModelConfig = ModelConfig(kwargs.get('filename')) #*args,**kwargs)
        self._modelManager = ModelManager()

        self._id =  uuid.uuid1().int
        self._modelAttributes = self._ModelConfig.getModelAttributes(self.getModelName()) 
        self._OnSuccess = self.getModelAttributes().get('OnSuccess')
        self._OnFailure = self.getModelAttributes().get('OnFailure')
        self._model_type = self.getModelAttributes().get('type')
        self._model = ModelInit(modelId=self.getId(),*args,**kwargs,**self._modelAttributes,ports=self._ModelConfig.getModelInputPorts(self.getModelName()))
        self.setModelRefs()

        if (self.getModelType() == 'connection'):
            self._modelManager.setConnectionRefs(modelRefs=self.getModelRefs(),connectionObject=kwargs.get('connectionObject'))
            self._modelManager.setConnectionModels(model=self.getModel(),connectionObject=kwargs.get('connectionObject'))
        else:
            self._modelManager.setModelRefs(objRefs=self.getObjRefs(),modelName=self.getModelName()) 
            self._modelManager.setModels(model=self.getModel(),modelName=self.getModelName()) 
def main():
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        datefmt="%Y-%m-%d %H:%M:%S",
                        filename="log.txt",
                        filemode='w')

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    model = FrontNet(PreActBlock, [1, 1, 1])
    ModelManager.Read('Models/FrontNetNicky.pt', model)

    DATA_PATH = "/Users/usi/PycharmProjects/data/"

    [x_test, y_test,
     z_test] = DataProcessor.ProcessTestData(DATA_PATH + "TestNicky.pickle",
                                             60, 108)
    #x_test = x_test
    #y_test = y_test
    test_set = Dataset(x_test, y_test)
    params = {'batch_size': 1, 'shuffle': False, 'num_workers': 0}
    test_generator = data.DataLoader(test_set, **params)
    trainer = ModelTrainer(model)

    valid_loss_x, valid_loss_y, valid_loss_z, valid_loss_phi, outputs, gt_labels = trainer.ValidateSingleEpoch(
        test_generator)

    VizDroneBEV(x_test, y_test, z_test, outputs)
Esempio n. 8
0
    def __init__(self):
        self.yd = YahooDataset()
        self.movieID_to_info = self.yd.loadMovies()
        self.fullTrainSet  = self.yd.loadFullSet()

        self.models = ModelManager().getAllModels()
        self.fullTrainSet.rating_scale = (1, 13)
Esempio n. 9
0
    def __init__(self, v1_model_path, v2_model_path, v3_model_path):
        model = FrontNet(PreActBlock, [1, 1, 1])
        ModelManager.Read(v1_model_path, model)
        #state_dict = torch.load(v1_model_path, map_location='cpu')
        rospy.loginfo(v1_model_path)
        #model.load_state_dict(state_dict['model'])
        #model.load_state_dict(torch.load(v1_model_path, map_location='cpu'))
       
        self.trainer = ModelTrainer(model)
        model2 = FrontNet(PreActBlock, [1, 1, 1])
        #smodel2.load_state_dict(torch.load(v2_model_path, map_location='cpu'))
        ModelManager.Read(v2_model_path, model2)
        self.trainer2 = ModelTrainer(model2)


        self.pose_pub = rospy.Publisher("predicted_pose", PoseStamped, queue_size=1)
Esempio n. 10
0
 def initGui():
     if not Builder.isTraining() and not Builder.isReply():
         return
     Builder.mm = ModelManager(Builder.getMapName())
     Builder.currentPath = Builder.pm.getNextPath()
     Builder.inBattle = True
     Builder.moving = False
     Builder.blockMove = False
Esempio n. 11
0
    def __init__(self, key):
        """Constructor for a client object
    """
        self.key = key
        self.dictionaryManager = DictionaryManager(key)
        self.entityManager = EntityManager(key)
        self.conceptManager = ConceptManager(key)
        self.modelManager = ModelManager(key)
        self.categoryManager = CategoryManager(key)

        # Default parameter values for /semantic_tagging
        self.fields = ''
        self.filter_data = 'y'

        # Default parameter values for /check
        self.mode = 'all'
        self.group_errors = '2'
        self.check_spacing = 'n'
Esempio n. 12
0
    def deleteModel(self, algo, model):
        path = "models/" + algo + "/" + model
        try:
            ModelManager().deleteModel(path)
        except OSError as e:
            return "error"

        self.models[algo].remove(model)

        return "success"
Esempio n. 13
0
class ModelBootstrap(object):
    def __init__(self,*args,**kwargs):
# Setup log configurations
        logSetup.logSetup()
        self.logger = logging.getLogger(__name__)

        self._ModelConfig = ModelConfig(kwargs.get('filename'))
        self._modelNames = self._ModelConfig.getModels()
        self._ModelCreate = ModelCreate.ModelCreate(*args,**kwargs)
        self._modelInit = threading.Event()
        self.createModels(*args,**kwargs)
        self._ModelManager = ModelManager()
        self.createConnections(*args,**kwargs)
        self.startThreads()

    def createModels(self,*args,**kwargs):
        for modelName in self._modelNames:
            self._ModelCreate.create(modelName=modelName,model_init_event=self._modelInit,*args,**kwargs)
 
    def createConnections(self,*args,**kwargs):
        for _connection in self._ModelManager.getModelConnections():
            self._ModelCreate.create(modelName='connection',connectionObject=_connection,model_init_event=self._modelInit,*args,**kwargs)

    async def startParallelThreads(self,_thread,modelName):
        _thread.setDaemon(True)
        _thread.start()
        if _thread.isDaemon():
            self.logger.debug('Model - %s, Started daemon thread - %s', modelName, _thread.getName())
        else:
            self.logger.debug('Model - %s, Started non-daemon thread - %s', modelName, _thread.getName())

 
    def startThreads(self,*args,**kwargs):
        self.logger.info('Starting Threads')
        tasks = []
        for modelName, _threads in self._ModelManager.getModelThreads():
            for _thread in _threads:
                loop = asyncio.get_event_loop()
                tasks.append(asyncio.ensure_future(self.startParallelThreads(_thread,modelName),loop=loop))
        loop.run_until_complete(asyncio.gather(*tasks,return_exceptions=False))
        #print('init ok')
        self._modelInit.set()
        self.logger.info('Started all Threads.')
Esempio n. 14
0
def TestInference():

    #logging.disable(logging.INFO)
    frame = cv2.imread("../Deployment/dataset/87.pgm", 0)
    frame = np.reshape(frame, (60, 108, 1))
    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read("Models/DronetHimax108x60.pt", model)
    trainer = ModelTrainer(model)
    v1_pred = trainer.InferSingleSample(frame)
    print("output")
    print(v1_pred)
Esempio n. 15
0
    def __init__(self,*args,**kwargs):
# Setup log configurations
        logSetup.logSetup()
        self.logger = logging.getLogger(__name__)

        self._ModelConfig = ModelConfig(kwargs.get('filename'))
        self._modelNames = self._ModelConfig.getModels()
        self._ModelCreate = ModelCreate.ModelCreate(*args,**kwargs)
        self._modelInit = threading.Event()
        self.createModels(*args,**kwargs)
        self._ModelManager = ModelManager()
        self.createConnections(*args,**kwargs)
        self.startThreads()
Esempio n. 16
0
def Test():
    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read("Models/DronetHimax160x90.pt", model)

    trainer = ModelTrainer(model)

    #ModelManager.Read("Models/FrontNetGray.pt", model)
    [x_test, y_test] = DataProcessor.ProcessTestData(
        "/Users/usi/PycharmProjects/data/160x90HimaxStatic_12_03_20.pickle")
    test_set = Dataset(x_test, y_test)

    params = {'batch_size': 64, 'shuffle': False, 'num_workers': 1}
    test_loader = data.DataLoader(test_set, **params)
    trainer.Predict(test_loader)
	def __init__(self,key):
		""" Constructor for a client object """
		self.key = key;
		self.dictionaryManager = DictionaryManager(key)
		self.entityManager      = EntityManager(key)
		self.conceptManager     = ConceptManager(key)
		self.modelManager       = ModelManager(key)
		self.categoryManager    = CategoryManager(key)

		# Default parameter values for /semantic_tagging 
		self.fields = ''
		self.filter_data = 'y'

		# Default parameter values for /check 
		self.mode = 'all'
		self.group_errors = '2'
		self.check_spacing = 'n' 
Esempio n. 18
0
def TestInference():

    frame = cv2.imread("test13.pgm", 0)
    frame = frame[92:152, 108:216]
    frame = np.reshape(frame, (60, 108, 1))
    #print(frame.flatten()[:10])
    #cv2.imshow("", frame)
    #cv2.waitKey()
    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read("Models/DronetGray.pt", model)
    # weight = model.conv.weight.data
    # weight = np.reshape(weight, (-1))
    # weight = weight[:10]
    # print(weight)
    trainer = ModelTrainer(model)
    v1_pred = trainer.InferSingleSample(frame)
    print("output")
    print(v1_pred)
Esempio n. 19
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        datefmt="%Y-%m-%d %H:%M:%S",
                        filename="log.txt",
                        filemode='w')

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read('../PyTorch/Models/DronetGray.pt', model)
    trainer = ModelTrainer(model)

    InferenceData(trainer)
def main():
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        datefmt="%Y-%m-%d %H:%M:%S",
                        filename="log.txt",
                        filemode='w')

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)


    DATA_PATH = "/Users/usi/PycharmProjects/data/160x90/"

    # Get baseline results

    picklename = "160x90HimaxMixedTest_12_03_20.pickle"
    [x_test, y_test] = DataProcessor.ProcessTestData(DATA_PATH + picklename)
    test_set = Dataset(x_test, y_test)
    params = {'batch_size': 1, 'shuffle': False, 'num_workers': 1}
    test_generator = data.DataLoader(test_set, **params)
    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read('../PyTorch/Models/DronetHimax160x90AugCrop.pt', model)
    trainer = ModelTrainer(model)
    MSE2, MAE2, r2_score2, outputs2, gt_labels2 = trainer.Test(test_generator)

    # Get pitch values

    picklename = "160x90HimaxMixedTest_12_03_20Rot.pickle"
    r_test = DataProcessor.GetRollFromTestData(DATA_PATH + picklename)

    print(r_test)


    if picklename.find(".pickle"):
        picklename = picklename.replace(".pickle", '')


    Plot2Models(r_test, picklename, r2_score2)
Esempio n. 21
0
def Filter():
    [x_test, y_test] = DataProcessor.ProcessTestData(
        "/Users/usi/PycharmProjects/data/test_vignette4.pickle")
    x_test2 = []
    y_test2 = []
    for i in range(len(x_test)):
        gt = y_test[i]
        if ((gt[0] > 1.0) and (gt[0] < 2.0)):
            x_test2.append(x_test[i])
            y_test2.append(y_test[i])

    x_test2 = np.asarray(x_test2)
    y_test2 = np.asarray(y_test2)
    test_set = Dataset(x_test2, y_test2)

    params = {'batch_size': 64, 'shuffle': False, 'num_workers': 0}
    test_loader = data.DataLoader(test_set, **params)
    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read("Models/DronetGray.pt", model)
    trainer = ModelTrainer(model)
    trainer.Predict(test_loader)
def main():
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        datefmt="%Y-%m-%d %H:%M:%S",
                        filename="log.txt",
                        filemode='w')

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    DATA_PATH = "/Users/usi/PycharmProjects/data/160x90/"
    [x_test, y_test, z_test] = DataProcessor.ProcessTestData(
        DATA_PATH + "160x90HimaxMixedTest_12_03_20.pickle", True)

    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read('../PyTorch/Models/DronetHimax160x90Augmented.pt', model)
    trainer = ModelTrainer(model)

    VizHeatMapsByAngle(z_test, x_test, y_test, trainer)
Esempio n. 23
0
def main():
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        datefmt="%Y-%m-%d %H:%M:%S",
                        filename="log.txt",
                        filemode='w')

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read('../PyTorch/Models/DronetHimax160x90.pt', model)

    DATA_PATH = "/Users/usi/PycharmProjects/data/"
    picklename = "160x90HimaxDynamic_12_03_20Shift.pickle"
    [x_test, y_test, z_test] = DataProcessor.ProcessTestData(DATA_PATH + picklename, True)

    test_set = Dataset(x_test, y_test)
    params = {'batch_size': 1,
              'shuffle': False,
              'num_workers': 1}
    test_generator = data.DataLoader(test_set, **params)
    trainer = ModelTrainer(model)

    MSE, MAE, r2_score, outputs, gt_labels = trainer.Test(test_generator)

    h = x_test.shape[2]
    w = x_test.shape[3]
    x_test = np.reshape(x_test, (-1, h, w))

    if picklename.find(".pickle"):
        picklename = picklename.replace(".pickle", '')

    VizWorldTopView(x_test, y_test, z_test, outputs, True, picklename)
Esempio n. 24
0
#model definition
from ModelManager import ModelManager
if parameters.arch() == 'vgg16':
    model = models.vgg16(pretrained=True)
    input_nodes = 25088
elif parameters.arch() == 'densenet121':
    model = models.densenet121(pretrained=True)
    input_nodes = 1024

classifier = nn.Sequential(
    nn.Linear(input_nodes, parameters.hidden_units()), nn.ReLU(),
    nn.Dropout(0.2),
    nn.Linear(parameters.hidden_units(),
              len(data_manager.get_images_data_training().class_to_idx)),
    nn.LogSoftmax(dim=1))
model_manager = ModelManager(parameters.gpu())
model_manager.set_model(model, classifier, parameters.learning_rate())

#model training
model_manager.train(
    parameters.epochs(), {
        'training': data_manager.get_dataloader_training(),
        'validation': data_manager.get_dataloader_validation()
    })
#model testing
accuracy = model_manager.test_accuracy(data_manager.get_dataloader_testing())

if accuracy >= 0.7:
    print("Accuracy over 70 perc. Good model")
else:
    print("Accuracy over 70 perc. Bad model")
Esempio n. 25
0
 def save_checkpoint(self, val_loss, model, epoch, file_name):
     '''Saves model when validation loss decrease.'''
     if self.verbose:
         logging.info("[EarlyStopping]  Validation loss decreased {} --> {}. Saving model as {}".format(self.val_loss_min, val_loss, file_name))
     ModelManager.Write(model, epoch, file_name)
     self.val_loss_min = val_loss
def main():
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        datefmt="%Y-%m-%d %H:%M:%S",
                        filename="log.txt",
                        filemode='w')

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    DATA_PATH = "/Users/usi/PycharmProjects/data/160x90/"
    [x_test, y_test, z_test] = DataProcessor.ProcessTestData(DATA_PATH + "160x90HimaxMixedTrain_12_03_20.pickle", True)
    model = Dronet(PreActBlock, [1, 1, 1], True)
    ModelManager.Read('../PyTorch/Models/DronetHimax160x90Mixed.pt', model)
    trainer = ModelTrainer(model)

    df = pd.DataFrame({'x': z_test[:, 0], 'y': z_test[:, 1], 'z': z_test[:,2], 'phi': z_test[:, 3]})
    walls = SortByAngle(df)

    fig, a = plt.subplots(2, 2, figsize=(9,9))

    for w in range(len(walls)):
        zw_set = z_test[walls[w]]
        xw_set = x_test[walls[w]]
        yw_set = y_test[walls[w]]
        df_w = pd.DataFrame({'x': zw_set[:, 0], 'y': zw_set[:, 1], 'z': zw_set[:, 2], 'phi': zw_set[:, 3]})

        min = np.amin(zw_set, axis=0)
        max = np.amax(zw_set, axis=0)

        xcells = SortByCoordinates(df_w, 'x')
        ycells = SortByCoordinates(df_w, 'y')

        valid_loss = []
        samples = []

        for i in range(len(xcells)):
            for j in range(len(ycells)):
                cell = intersection(xcells[i], ycells[j])
                if len(cell) > 0:
                    x = xw_set[cell]
                    y = yw_set[cell]
                    test_set = Dataset(x, y)
                    params = {'batch_size': 1, 'shuffle': False, 'num_workers': 0}
                    test_generator = data.DataLoader(test_set, **params)

                    MSE, MAE, r2_score, y_pred, gt_labels = trainer.Test(test_generator)
                    loss = MAE[1]
                else:
                    loss = 0
                valid_loss.append(loss)
                samples.append(len(cell))

        VizHeatMap(valid_loss, a, samples, len(xcells), len(ycells), w, min, max)

    fig.tight_layout()
    fig.suptitle('Y axis Error vs Pose')
    plt.savefig("yheatmap.png")
    plt.show()
Esempio n. 27
0
        self.recomHeavy = self.recommendedLevel(maxHeavy)
        message = "The minimum legal standard for light and heavy impact sound is 50 and 58dB. To satisfy this" \
                  "standard, technology with light impact reduction grade of " + str(self.recomLight) + "and heavy impact" \
                  "reduction grade of " + str(self.recomHeavy) + " is required. Do you want to add this to the searching condition?"
        self.messageUI.textBrowserMessage.setText(message)
        self.messageDialog.show()

    def recommendedLevel(self, intensity):
        if intensity < 20:
            return 4
        if intensity < 40:
            return 3
        if intensity < 60:
            return 2
        return 1

    def connectComboBoxSliderAndLabel(self, comboBox, slider, label):
        comboBox.currentIndexChanged.connect(
            lambda: self.initSlider(slider, comboBox, label))
        slider.sliderMoved.connect(
            lambda text=str(slider.value()): self.initLabel(label, text))


if __name__ == "__main__":
    data = DBManager()
    model = ModelManager(data, "techs")

    app = QApplication(sys.argv)
    interface = Interface(model)
    interface.show()
    sys.exit(app.exec_())
Esempio n. 28
0
class ModelIO(object):
    def __init__(self,*args,**kwargs):
#Setup log
        self.logger = logging.getLogger(__name__)

        super(ModelIO,self).__init__()
        self._subRefs = {}
        self._objRefs = {}
        self._modelRefs = {}

        self._modelName = kwargs.get('modelName')
        self._objectList = ['queue','thread','semaphore','event','subscribe']
        self._ModelConfig = ModelConfig(kwargs.get('filename')) #*args,**kwargs)
        self._modelManager = ModelManager()

        self._id =  uuid.uuid1().int
        self._modelAttributes = self._ModelConfig.getModelAttributes(self.getModelName()) 
        self._OnSuccess = self.getModelAttributes().get('OnSuccess')
        self._OnFailure = self.getModelAttributes().get('OnFailure')
        self._model_type = self.getModelAttributes().get('type')
        self._model = ModelInit(modelId=self.getId(),*args,**kwargs,**self._modelAttributes,ports=self._ModelConfig.getModelInputPorts(self.getModelName()))
        self.setModelRefs()

        if (self.getModelType() == 'connection'):
            self._modelManager.setConnectionRefs(modelRefs=self.getModelRefs(),connectionObject=kwargs.get('connectionObject'))
            self._modelManager.setConnectionModels(model=self.getModel(),connectionObject=kwargs.get('connectionObject'))
        else:
            self._modelManager.setModelRefs(objRefs=self.getObjRefs(),modelName=self.getModelName()) 
            self._modelManager.setModels(model=self.getModel(),modelName=self.getModelName()) 

    def getModelType(self):
        return self._model_type

    def getModelAttributes(self):
        return self._modelAttributes

    def getId(self):
        return self._id

    def getModelName(self):
        return self._modelName

    def getModel(self):
        return self._model

    def getOnSuccess(self):
        return self._OnSuccess

    def getOnFailure(self):
        return self._OnFailure

    #def formDict(self,key,value):
    #    return dict(zip([key],[value]))

    def setModelRefsForObjects(self,object):
        if (object == 'queue'):
            self._subRefs ['subref'] = self.getModel().getQ().getSubRefs() 
            self._subRefs ['objectid'] = self.getModel().getQ().getId()
        elif (object == 'thread'):
            self._subRefs ['subref'] = self.getModel().getThread().getSubRefs()
            self._subRefs ['objectid'] = self.getModel().getThread().getId()
        elif (object == 'semaphore'):
            self._subRefs ['subref'] = self.getModel().getSemaphore().getSubRefs()
            self._subRefs ['objectid'] = self.getModel().getSemaphore().getId()
        elif (object == 'event'):
            self._subRefs ['subref'] = self.getModel().getEvent().getSubRefs()
            self._subRefs ['objectid'] = self.getModel().getEvent().getId()
        elif (object == 'subscribe'):
            self._subRefs ['subref'] = self.getModel().getSubscribe().getSubRefs()
            self._subRefs ['objectid'] = self.getModel().getSubscribe().getId()
        else:
            return

        self._objRefs ['modelId'] = self.getId()

        if (self.getOnSuccess()):
            self._objRefs ['OnSuccess'] = list(self.getOnSuccess().split(sep=',')) #kwargs['next'].split(sep=',')
        else:
            self._objRefs ['OnSuccess'] = []

        if (self.getOnFailure()):
            self._objRefs ['OnFailure'] = list(self.getOnFailure().split(sep=',')) #kwargs['next'].split(sep=',')
        else:
            self._objRefs ['OnFailure'] = []

        self._objRefs ['model_type'] = self.getModelType()
        self._objRefs [object] = copy.deepcopy(self._subRefs)
        self._modelRefs [self.getModelName()] = copy.deepcopy(self._objRefs)

    def getObjectList(self):
        return self._objectList

    def setModelRefs(self):
        return list(map(self.setModelRefsForObjects,self.getObjectList()))

    def getObjRefs(self):
        return self._objRefs

    def getModelRefs(self):
        return self._modelRefs
Esempio n. 29
0
sequence_file_path = "./data/generated_data/" + sys.argv[1]
with open(sequence_file_path) as file:
    lines = file.readlines()
    for line in lines:
        symbols = line.strip().split(",")

        # check that the start end endsymbol are not use
        assert 'x' not in symbols and 'o' not in symbols

        symbol_sequences.append(symbols)


print(f"[{len(symbol_sequences)} sequences loaded]")

# create to be estimated from symbol_sequence
manager = ModelManager(symbol_sequences, 10)

# run model epochs
manager.run(10)

# show the probability distribution of the different sequences in the model
pred_probs = manager.get_best_model_probility()
# print(pred_probs)


with open("./data/MODEL_DEFINTION.txt") as file:
    lines = file.readlines()
    true_probs = []
    for line in lines:
        line = line.strip()
        [trace, prob] = line.split(" ")
Esempio n. 30
0
import warnings
warnings.filterwarnings("ignore")
from PIL import Image
#torch
import torch
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
from torch import nn
#parameters Loading
from AppParametersLoader import AppParametersLoader
parameters = AppParametersLoader()
parameters.print_all()
#Data Loading
from DataManager import DataManager
data_manager = DataManager()
#previous saved model loading
from ModelManager import ModelManager
chk_path = parameters.save_dir() + '/checkpoint.pth'
model_manager = ModelManager(parameters.gpu())
model_manager.load_model(chk_path)
#prediction
image_to_predict = data_manager.process_image(parameters.image_path())
top_ps, top_classes = model_manager.predict(image_to_predict, topk=parameters.top_k())
#Getting categories names by class labels
cat_to_name = data_manager.get_cat_to_name(parameters.category_names_path())
category_names = []
for i in top_classes:
    category_names += [cat_to_name[i]]
print(f"Predicition: probabilities: {top_ps} ")
print(f"Prediction: classes: {category_names} ")
Esempio n. 31
0
    def recommend(self, params):
        user = self.fullTrainSet.to_inner_uid(params["user"])

        antiTestSet = self._buildAntiTestSetForUser(user)

        algo = params["algorithm"]
        path = "models/" + algo

        if algo == "svd":
            if "models" not in params.keys():
                args = {
                    "random_state": 0,
                    "reg_all": float(params["rr"]),
                    "lr_all": float(params["lr"]),
                    "n_epochs": int(params["ne"]),
                    "n_factors": int(params["factors"])
                }

                svd = SVD(**args)
                svd = svd.fit(self.fullTrainSet)
                predictions = svd.test(antiTestSet)

                if "name" in params.keys():
                    mm = ModelManager()
                    name = params["name"]
                    path = path + "/" + name
                    mm.saveModel(svd, path)
                    self.models[algo].append(name)
            else:
                mm = ModelManager()
                model = params["models"]
                path = path + "/" + model

                svd, _ = mm.loadModel(path)
                predictions = svd.test(antiTestSet)

            topN = self._getTopNForUser(predictions)
            topN = [(self.getAdditionalData(movieId), int(round(estimated, 0)))for movieId, estimated in topN]

        elif algo == "knnItemBaseline":

            if "models" not in params.keys():
                args = {
                    "sim_options" : {'name': 'cosine', 'user_based': False},
                    "k": int(params["k"])
                }

                knn = KNNBaseline(**args)
                knn = knn.fit(self.fullTrainSet)
                predictions = knn.test(antiTestSet)

                if "name" in params.keys():
                    mm = ModelManager()
                    name = params["name"]
                    path = path + "/" + name
                    mm.saveModel(knn, path)
                    self.models[algo].append(name)
            else:
                mm = ModelManager()
                model = params["models"]
                path = path + "/" + model

                knn, _ = mm.loadModel(path)
                predictions = knn.test(antiTestSet)

            topN = self._getTopNForUser(predictions)
            topN = [(self.getAdditionalData(movieId), int(round(estimated, 0))) for movieId, estimated in topN]

        elif algo == "weightedHybrid":

            svd = SVD(random_state=0, reg_all=0.1, lr_all=0.003, n_factors=30, verbose=False)
            knn = KNNBaseline(sim_options={'name': 'cosine', 'user_based': False}, k=150)
            weightedHybrid = WeightedHybridAlgorithm(svd, knn, weights=[0.6, 0.4])
            weightedHybrid.fit(self.fullTrainSet)
            predictions = weightedHybrid.test(antiTestSet)
            topN = self._getTopNForUser(predictions)
            topN = [(self.getAdditionalData(movieId), int(round(estimated, 0))) for movieId, estimated in topN]

        elif algo == "userCollaborative":

            if "models" not in params.keys():
                args = {
                    "k": int(params["k"]),
                    "sim_options": {'name': 'cosine', 'user_based': True}
                }

                knn = knnRecAlgorithm(**args)
                knn = knn.fit(self.fullTrainSet)
                predictions = knn.test(antiTestSet)

                if "name" in params.keys():
                    mm = ModelManager()
                    name = params["name"]
                    path = path + "/" + name
                    mm.saveModel(knn, path)
                    self.models[algo].append(name)
            else:
                mm = ModelManager()
                model = params["models"]
                path = path + "/" + model

                knn, _ = mm.loadModel(path)
                predictions = knn.test(antiTestSet)

            topN = self._getTopNForUser(predictions, minimumRating=0.0)
            # topN = [(self.getAdditionalData(movieId), round(estimated, 2)) for movieId, estimated in topN]
            topN = [(self.getAdditionalData(movieId), "") for movieId, estimated in topN]

        elif algo == "bpr":

            if "models" not in params.keys():

                args = {
                    "reg": float(params["rr"]),
                    'learning_rate': float(params["lr"]),
                    'n_iters': int(params["ni"]),
                    'n_factors': int(params["factors"]),
                    'batch_size': 100
                }

                bpr = BPRecommender(args)
                bpr = bpr.fit()

                if "name" in params.keys():
                    mm = ModelManager()
                    name = params["name"]
                    path = path + "/" + name
                    mm.saveBprModel(bpr, path)
                    self.models[algo].append(name)
            else:
                mm = ModelManager()
                model = params["models"]
                path = path + "/" + model
                bpr = mm.loadBprModel(path)

            topN = bpr.recommend(user)

            topN = [(self.getAdditionalData(movieId), "") for movieId in topN]

        return topN
Esempio n. 32
0
class ModelIO(object):
    def __init__(self,*args,**kwargs):
#Setup log
        self.logger = logging.getLogger(__name__)

        super(ModelIO,self).__init__()
        self._subRefs = {}
        self._objRefs = {}
        self._modelRefs = {}

        self._modelName = kwargs.get('modelName')
        self._objectList = ['queue','thread','semaphore','event','subscribe']
        self._ModelConfig = ModelConfig(kwargs.get('filename')) #*args,**kwargs)
        self._modelManager = ModelManager()

        self._id =  uuid.uuid1().int
        self._modelAttributes = self._ModelConfig.getModelAttributes(self.getModelName()) 
        self._OnSuccess = self.getModelAttributes().get('OnSuccess')
        self._OnFailure = self.getModelAttributes().get('OnFailure')
        self._model_type = self.getModelAttributes().get('type')
        self._model = ModelInit(modelId=self.getId(),*args,**kwargs,**self._modelAttributes,ports=self._ModelConfig.getModelInputPorts(self.getModelName()))
        self.setModelRefs()

        if (self.getModelType() == 'connection'):
            self._modelManager.setConnectionRefs(modelRefs=self.getModelRefs(),connectionObject=kwargs.get('connectionObject'))
            self._modelManager.setConnectionModels(model=self.getModel(),connectionObject=kwargs.get('connectionObject'))
        else:
            self._modelManager.setModelRefs(objRefs=self.getObjRefs(),modelName=self.getModelName()) 
            self._modelManager.setModels(model=self.getModel(),modelName=self.getModelName()) 

    def getModelType(self):
        return self._model_type

    def getModelAttributes(self):
        return self._modelAttributes

    def getId(self):
        return self._id

    def getModelName(self):
        return self._modelName

    def getModel(self):
        return self._model

    def getOnSuccess(self):
        return self._OnSuccess

    def getOnFailure(self):
        return self._OnFailure

    #def formDict(self,key,value):
    #    return dict(zip([key],[value]))

    def setModelRefsForObjects(self,object):
        if (object == 'queue'):
            self._subRefs ['subref'] = self.getModel().getQ().getSubRefs() 
            self._subRefs ['objectid'] = self.getModel().getQ().getId()
        elif (object == 'thread'):
            self._subRefs ['subref'] = self.getModel().getThread().getSubRefs()
            self._subRefs ['objectid'] = self.getModel().getThread().getId()
        elif (object == 'semaphore'):
            self._subRefs ['subref'] = self.getModel().getSemaphore().getSubRefs()
            self._subRefs ['objectid'] = self.getModel().getSemaphore().getId()
        elif (object == 'event'):
            self._subRefs ['subref'] = self.getModel().getEvent().getSubRefs()
            self._subRefs ['objectid'] = self.getModel().getEvent().getId()
        elif (object == 'subscribe'):
            self._subRefs ['subref'] = self.getModel().getSubscribe().getSubRefs()
            self._subRefs ['objectid'] = self.getModel().getSubscribe().getId()
        else:
            return

        self._objRefs ['modelId'] = self.getId()

        if (self.getOnSuccess()):
            self._objRefs ['OnSuccess'] = list(self.getOnSuccess().split(sep=',')) #kwargs['next'].split(sep=',')
        else:
            self._objRefs ['OnSuccess'] = []

        if (self.getOnFailure()):
            self._objRefs ['OnFailure'] = list(self.getOnFailure().split(sep=',')) #kwargs['next'].split(sep=',')
        else:
            self._objRefs ['OnFailure'] = []

        self._objRefs ['model_type'] = self.getModelType()
        self._objRefs [object] = copy.deepcopy(self._subRefs)
        self._modelRefs [self.getModelName()] = copy.deepcopy(self._objRefs)

    def getObjectList(self):
        return self._objectList

    def setModelRefs(self):
        return list(map(self.setModelRefsForObjects,self.getObjectList()))

    def getObjRefs(self):
        return self._objRefs

    def getModelRefs(self):
        return self._modelRefs
Esempio n. 33
0
import one
import random
from ModelManager import ModelManager
def print_cache():
    three=one.One()
    result=[]
    for i in range(1000):
        result.append(three.get_a(random.randint(0,1000)))
    print(len(result))
    print(three.get_a.cache_info())

def print_nocache():
    three=one.One_nocache()
    print(three.get_a())

manager=ModelManager()
print(manager.getModelRefs())
Esempio n. 34
0
Created on Sun Dec 13 20:52:09 2020

@author: Dimo
"""
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt

from resnet18 import Resnet18
from DS import LoadData
from ModelManager import ModelManager

if __name__ == "__main__":
    manager = ModelManager(ModelRoot='./model')

    #train for the known model
    manager.train_known(1,30,40)
    manager.validate()
    for i in range(3):
        #start unknown part
        print('Calculate sample probability...')
        probs = manager.predict_probability(10000) #budget of searching
        # print(len(temp[1]))
        logprob = torch.log(probs[1])
        E = logprob*probs[1]
        E = E.sum(1)
        probs1 = list(zip(probs[0],E))
    #     print(probs1)
        res = sorted(probs1, key = lambda x: x[1])
Esempio n. 35
0
class PersistencyManager( object ):
    def __init__( self ):
        self._entity = EntityManager()
        self._model = ModelManager()



    def addModel( self, fileName ):
		# TODO > not yet implemented
        if self._model._checkModelCorrectness(fileName):
            name = self._model.addModel(fileName)
            if name == None:
                return
            for entity in self._model._model[name].entities():
                self._entity.addEntity( entity, self._model._model[name][entity]['_signature'] )



    def removeModel(self, modelName):
        self._model.removeModel(modelName)



    def add(self, model, entity):
        if not model in self._model._model.keys():
            return
        wip = self._entity.add(entity)
        self._model.add(model, entity, wip)
        return wip



    def remove(self, model, entity, id):
        if not model in self._model._model.keys():
            return
        self._entity.remove(entity, id)
        self._model.remove(model, entity, id)



    def update(self, model, entity, id, field, val):
        if not self._model.exist(model, entity):
            return
        self._entity.update(entity, id, field, val)



    def ref(self, model, entity, id, field, val):
        if not( self._model.exist(model, entity, id) and self._entity.exist(entity) ):
            return
        self._model.ref(model, entity, id, field, val)



    def deref(self, model, entity, id, field, val):
        self._model.deref(model, entity, id, field, val)



    def get(self, model, entity, id, field):
        if self._model.fieldInfo(model, entity, field) != None:
            return self._model.get(model, entity, id, field)
        elif self._entity.hasField(entity, field):
            return self._entity.get(entity, id, field)
        else:
            return None

    def getFieldInfo(self, model, entity, id, field ):
        tmp = self._model.fieldInfo( model, entity, field )
        if tmp == None:
            tmp = []
        return tmp

    def getInfo(self, model=None, entity=None, id=None, field=None):
        if model == None:
            return self._model.getInfo()
        if entity == None:
            return self._model.getInfo(model)
        if id == None:
            return self._entity.getInfo(entity)
        if field == None:
            ret = self._model.getInfo(model, entity)
            ret += [x[0] for x in self._model.entitySignature(model, entity)]
            ret.remove('_signature')
            return ret
        return self.get(model, entity, id, field)
Esempio n. 36
0
 def __init__( self ):
     self._entity = EntityManager()
     self._model = ModelManager()
Esempio n. 37
0
class SemPubClient:
    def __init__(self, key):
        """Constructor for a client object
    """
        self.key = key
        self.dictionaryManager = DictionaryManager(key)
        self.entityManager = EntityManager(key)
        self.conceptManager = ConceptManager(key)
        self.modelManager = ModelManager(key)
        self.categoryManager = CategoryManager(key)

        # Default parameter values for /semantic_tagging
        self.fields = ''
        self.filter_data = 'y'

        # Default parameter values for /check
        self.mode = 'all'
        self.group_errors = '2'
        self.check_spacing = 'n'

    # Setters for configuration parameters
    def setAnalysisFields(self, fields):
        self.fields = fields

    def setAnalysisFilterData(self, filter_data):
        self.filter_data = filter_data

    def setCheckMode(self, mode):
        self.model = mode

    def setCheckGroupErrors(self, group_errors):
        self.group_errors = group_errors

    def setCheckSpacing(self, check_spacing):
        self.check_spacing = check_spacing

    def __parseResponse(self, response):
        """
      Helper method that parses the result or throws an Exception if the service returns an error
    """

        if response.status_code == requests.codes.ok:
            r = response.json()
            return r['result']
        else:
            r = response.json()
            raise SemPubException(response.status_code, r['status'])

        # Semantic tagging service operations
    def analyzeDocument(self, document, dictionary=None, models=None):
        """ Returns the text of the document analyzed including all the extracted semantic information. 
      It takes into account document metadata (language, source, timeref) to build a more accurate analysis
      
      :param document: :class:'Document' to be analyzed 
      :param dictionary (optional): a user defined :class:'Dictionary' to include for tagging 
      :param models (optional): a list of user defined :class:'Model: to include for classification
    """
        payload = {
            'key': self.key,
            'doc': str(document),
            'filter_data': self.filter_data,
            'fields': self.fields
        }
        if (dictionary is not None):
            payload['dictionary'] = dictionary['name']
        if (models is not None):
            if (isinstance(models, list)):
                modelnames = map(lambda x: x['name'], models)
                payload['model'] = modelnames
            else:
                payload['model'] = models['name']
        payload['src'] = 'sdk-python-1.0'
        endpoint = TAGGING_SERVICE_ENDPOINT
        response = requests.post(endpoint, data=payload)
        return self.__parseResponse(response)

    def analyzeText(self, text, lang, dictionary=None, models=None):
        """ Returns the text analyzed including all the extracted semantic information
      
      :param text: text to be analyzed 
      :param lang: language of the text 
      :param dictionary (optional): a user defined :class:'Dictionary' to include for tagging 
      :param models (optional): a list of user defined :class:'Model: to include for classification
    """
        doc = Document(1, text)
        doc['language'] = lang
        return self.analyzeDocument(doc, dictionary, models)

    # Text proofreading service operations
    def checkDocument(self, document, doc_offset=0, dictionary=None):
        """ Returns the proofreading issues found in the document text 
      It takes into account document metadata (language)
      
      :param document: :class:'Document' to be analyzed 
      :param doc_offset: offset in characters from where to start proofreading 
      :param dictionary (optional): a user defined :class:'Dictionary' that marks words in our dictionary as known 
    """
        payload = {
            'key': self.key,
            'doc': str(document),
            'doc_offset': doc_offset,
            'mode': self.mode,
            'group_errors': self.group_errors,
            'check_spacing': self.check_spacing
        }
        if (dictionary is not None):
            payload['dictionary'] = dictionary['name']
        payload['src'] = 'sdk-python-1.0'
        endpoint = CHECK_SERVICE_ENDPOINT
        response = requests.post(endpoint, data=payload)
        return self.__parseResponse(response)

    def checkText(self, text, lang, doc_offset=0, dictionary=None):
        """ Returns the proofreading issues found in the text
      
      :param text: text to be analyzed 
      :param lang: language of the text
      :param doc_offset: offset in characters from where to start proofreading 
      :param dictionary (optional): a user defined :class:'Dictionary'that marks words in our dictionary as known 
    """
        doc = Document(1, text)
        doc['language'] = lang
        return self.checkDocument(doc, doc_offset, dictionary)

    # CRUD operations on Dictionary
    def getDictionaryList(self, query, lang):
        """ List of user-defined dictionaries

      :param query: regular expresion to filter dictionaries
      :param lang: filter dictionaries in this language, use 'all' if a multilingual dictionary  
    """
        return self.dictionaryManager.getList(query, lang)

    def createDictionary(self, dictionary):
        return self.dictionaryManager.create(dictionary)

    def readDictionary(self, name):
        return self.dictionaryManager.read(name)

    def updateDictionary(self, dictionary):
        return self.dictionaryManager.update(dictionary)

    def deleteDictionary(self, name):
        return self.dictionaryManager.delete(name)

    def deleteDictionary(self, dictionary):
        return self.dictionaryManager.delete(dictionary.getId())

    # CRUD operations on Entity
    def getEntityList(self, dictionary, query):
        """ Shows a list of entities (:class:'Entity') included in the dictionary matching the query  

      :param dictionary: a :class:'Dictionary' object 
      :param query: a regular expression
    """
        return self.entityManager.getList(dictionary.getId(), query)

    def createEntity(self, entity, dictionary):
        return self.entityManager.create(entity, dictionary.getId())

    def readEntity(self, id, dictionary):
        return self.entityManager.read(id, dictionary.getId())

    def updateEntity(self, entity, dictionary):
        return self.entityManager.update(entity, dictionary.getId())

    def deleteEntity(self, id, dictionary):
        return self.entityManager.delete(id, dictionary.getId())

    def deleteEntity(self, entity, dictionary):
        return self.entityManager.delete(entity.getId(), dictionary.getId())

        # CRUD operations on Concept
    def getConceptList(self, dictionary, query):
        """ Shows a list of concepts (:class:'Concept') included in the dictionary matching the query  

      :param dictionary: a :class:'Dictionary' object 
      :param query: a regular expression
    """
        return self.conceptManager.getList(dictionary.getId(), query)

    def createConcept(self, concept, dictionary):
        return self.conceptManager.create(concept, dictionary.getId())

    def readConcept(self, id, dictionary):
        return self.conceptManager.read(id, dictionary.getId())

    def updateConcept(self, concept, dictionary):
        return self.conceptManager.update(concept, dictionary.getId())

    def deleteConcept(self, id, dictionary):
        return self.conceptManager.delete(id, dictionary.getId())

    def deleteConcept(self, concept, dictionary):
        return self.conceptManager.delete(concept.getId(), dictionary.getId())

        # CRUD operations on Model
    def getModelList(self, query, lang):
        """ List of user-defined models

      :param query: regular expresion to filter dictionaries
      :param lang: filter dictionaries in this language 
    """
        return self.modelManager.getList(query, lang)

    def createModel(self, model):
        return self.modelManager.create(model)

    def readModel(self, name):
        return self.modelManager.read(name)

    def updateModel(self, model):
        return self.modelManager.update(model)

    def deleteModel(self, name):
        return self.modelManager.delete(name)

    def deleteModel(self, model):
        return self.modelManager.delete(model.getId())

    # CRUD operation on Category
    def getCategoryList(self, model, query):
        """ Show a list of categories (:class:'Category') included in the model matching the query  

      :param model: a :class:'Model' object 
      :param query: a regular expression
    """
        return self.categoryManager.getList(model.getId(), query)

    def createCategory(self, category, model):
        return self.categoryManager.create(category, model.getId())

    def readCategory(self, id, model):
        return self.categoryManager.read(id, model.getId())

    def updateCategory(self, category, model):
        return self.categoryManager.update(category, model.getId())

    def deleteCategory(self, id, model):
        return self.categoryManager.delete(id, model.getId())

    def deleteCategory(self, category, model):
        return self.categoryManager.delete(category.getId(), model.getId())
Esempio n. 38
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch FrontNet')
    args = Parse(parser)

    torch.manual_seed(args.seed)

    # [NeMO] Setup of console logging.
    logging.basicConfig(level=logging.INFO,
                        format="%(asctime)s - %(levelname)s - %(message)s",
                        datefmt="%Y-%m-%d %H:%M:%S",
                        filename="log.txt",
                        filemode='w')

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    train_loader, validation_loader = LoadData(args)

    # [NeMO] Loading of the JSON regime file.
    regime = {}
    if args.regime is None:
        print("ERROR!!! Missing regime JSON.")
        raise Exception
    else:
        with open(args.regime, "r") as f:
            rr = json.load(f)
        for k in rr.keys():
            try:
                regime[int(k)] = rr[k]
            except ValueError:
                regime[k] = rr[k]

    model = HannaNet(ConvBlock, [1, 1, 1], True)

    h = 96
    w = 160

    if args.trainq:
        epoch = ModelManager.Read(args.load_model, model)
        trainer = ModelTrainer(model, args, regime, teacher=True)
        trainer.TrainQuantized(train_loader, validation_loader, h, w,
                               args.epochs)

    if args.quantize and not args.trainq:
        model = nemo.transform.quantize_pact(model,
                                             dummy_input=torch.ones(
                                                 (1, 1, h, w)))
        logging.info("[ETHQ2] Model: %s", model)
        epoch, prec_dict = ModelManager.ReadQ(args.load_model, model)
        trainer = ModelTrainer(model, args, regime)
        trainer.Deploy(validation_loader, h, w, prec_dict)

    if args.save_model is not None:
        # torch.save(trainer.model.state_dict(), args.save_model)
        ModelManager.Write(trainer.GetModel(), 100, args.save_model)

    print(model)
class SemPubClient:

	def __init__(self,key):
		""" Constructor for a client object """
		self.key = key;
		self.dictionaryManager = DictionaryManager(key)
		self.entityManager      = EntityManager(key)
		self.conceptManager     = ConceptManager(key)
		self.modelManager       = ModelManager(key)
		self.categoryManager    = CategoryManager(key)

		# Default parameter values for /semantic_tagging 
		self.fields = ''
		self.filter_data = 'y'

		# Default parameter values for /check 
		self.mode = 'all'
		self.group_errors = '2'
		self.check_spacing = 'n' 

    # Setters for configuration parameters
	def setAnalysisFields(self, fields):
		self.fields = fields

	def setAnalysisFilterData(self, filter_data):
		self.filter_data = filter_data

	def setCheckMode(self, mode):
		self.model = mode

	def setCheckGroupErrors(self, group_errors):
		self.group_errors = group_errors

	def setCheckSpacing(self, check_spacing):
		self.check_spacing = check_spacing

	def __parseResponse(self, response):
		"""Helper methos that parses the result ot throws an Exception if the service returns an error"""
		if response.status_code == requests.codes.ok:
			r = response.json()
			return r['result']
		else: 
			r = response.json()
			raise SemPubException(response.status_code, r['status'])

    # Semantic tagging services
	def analyzeDocument(self, document, dictionary=None, models=None):
		""" returns the text of the document analyzed including all the extracted semantic information. 
			It takes into account document metadata (language, source, timeref) to build more accurate analysis
			
			:param document: :class:'Document' to be analyzed 
			:param dictionary (optional): a user defined :class:'Dictionary' to include for tagging 
			:param models (optional): a list of user defined :class:'Model: to include for classification
		"""
		payload = {
			'key': self.key, 
			'doc': str(document), 
			'filter_data' : self.filter_data, 
			'fields' : self.fields }
		if (dictionary is not None):
		 	payload['dictionary'] = dictionary['name']		
		if (models is not None):
			if (isinstance(models,list)):
				modelnames = map(lambda x: x['name'], models)
				payload['model'] = modelnames
			else:
				payload['model'] = models['name']			
		endpoint = TAGGING_SERVICE_ENDPOINT
		response = requests.post(endpoint, data = payload)
		return self.__parseResponse(response)

	def analyzeText(self, text, lang, dictionary=None, models=None):
		""" returns the text analyzed including all the extracted semantic information
			
			:param text: text to be analyzed 
			:param lang: language of the text 
			:param dictionary (optional): a user defined :class:'Dictionary' to include for tagging 
			:param models (optional): a list of user defined :class:'Model: to include for classification
		"""
		doc = Document(1, text)
		doc['language'] = lang;
		return self.analyzeDocument(doc, dictionary, models);

	# Text proofreading services
	def checkDocument(self, document, doc_offset = 0, dictionary = None):
		""" returns the proofreading issues found in the document text 
			It takes into account document metadata (language)
			
			:param document: :class:'Document' to be analyzed 
			:param doc_offset: offset in characters from where to start proofreading 
			:param dictionary (optional): a user defined :class:'Dictionary' that marks words in our dictionary as known 
		"""
		payload = {
		'key': self.key, 
		'doc': str(document) , 
		'doc_offset' : doc_offset, 
		'mode' : self.mode, 
		'group_errors' : self.group_errors, 
		'check_spacing' : self.check_spacing }
		if (dictionary is not None):
			payload['dictionary'] = dictionary['name']						
		endpoint = CHECK_SERVICE_ENDPOINT
		response = requests.post(endpoint, data = payload)
		return self.__parseResponse(response)

	def checkText(self, text, lang, doc_offset = 0, dictionary = None):
		""" returns the proofreading issues found in the text
			
			:param text: text to be analyzed 
			:param lang: language of the text
			:param doc_offset: offset in characters from where to start proofreading 
			:param dictionary (optional): a user defined :class:'Dictionary'that marks words in our dictionary as known 
		"""
		doc = Document(1, text)
		doc['language'] = lang;
		return self.checkDocument(doc, doc_offset, dictionary);

    # CRUD operations on Dictionary  
	def getDictionaryList(self, query, lang):
		""" List of use-defined dictionaries

			:param query: regular expresion to filter dictionaries
			:param lang: filter dictionaries in this language, use 'all' if a multilingual dictionary  
		""" 
		return self.dictionaryManager.getList(query, lang)

	def createDictionary(self, dictionary):
		return self.dictionaryManager.create(dictionary)

	def readDictionary(self, name):
		return self.dictionaryManager.read(name)

	def updateDictionary(self, dictionary):
		return self.dictionaryManager.update(dictionary)

	def deleteDictionary(self, name):
		return self.dictionaryManager.delete(name)

	def deleteDictionary(self, dictionary):
		return self.dictionaryManager.delete(dictionary.getId())

    # CRUD operations on Entity  
	def getEntityList(self, dictionary, query):
		""" Show a list of entities (:class:'Entity') included in the dictionary matching the query  

			:param dictionary: a :class:'Dictionary' object 
			:param query: a regular expression
		"""
		return self.entityManager.getList(dictionary.getId(), query)

	def createEntity(self, entity, dictionary):
		return self.entityManager.create(entity, dictionary.getId())

	def readEntity(self, id, dictionary):
		return self.entityManager.read(id, dictionary.getId())

	def updateEntity(self, entity, dictionary):
		return self.entityManager.update(entity, dictionary.getId())

	def deleteEntity(self, id, dictionary):
		return self.entityManager.delete(id, dictionary.getId())

	def deleteEntity(self, entity, dictionary):
		return self.entityManager.delete(entity.getId(), dictionary.getId())

    # CRUD operations on Concept  
	def getConceptList(self, dictionary, query):
		""" Show a list of concepts (:class:'Concept') included in the dictionary matching the query  

			:param dictionary: a :class:'Dictionary' object 
			:param query: a regular expression
		"""
		return self.conceptManager.getList(dictionary.getId(), query)

	def createConcept(self, concept, dictionary):
		return self.conceptManager.create(concept, dictionary.getId())

	def readConcept(self, id, dictionary):
		return self.conceptManager.read(id, dictionary.getId())

	def updateConcept(self, concept, dictionary):
		return self.conceptManager.update(concept, dictionary.getId())

	def deleteConcept(self, id, dictionary):
		return self.conceptManager.delete(id, dictionary.getId())

	def deleteConcept(self, concept, dictionary):
		return self.conceptManager.delete(concept.getId(), dictionary.getId())


    # CRUD operations on Model  
	def getModelList(self, query, lang):
		""" List of user-defined models

			:param query: regular expresion to filter dictionaries
			:param lang: filter dictionaries in this language 
		""" 
		return self.modelManager.getList(query, lang)

	def createModel(self, model):
		return self.modelManager.create(model)

	def readModel(self, name):
		return self.modelManager.read(name)

	def updateModel(self, model):
		return self.modelManager.update(model)

	def deleteModel(self, name):
		return self.modelManager.delete(name)

	def deleteModel(self, model):
		return self.modelManager.delete(model.getId())

    # CRUD operation on Category  
	def getCategoryList(self, model, query):
		""" Show a list of categories (:class:'Category') included in the model matching the query  

			:param model: a :class:'Model' object 
			:param query: a regular expression
		"""
		return self.categoryManager.getList(model.getId(), query)

	def createCategory(self, category, model):
		return self.categoryManager.create(category, model.getId())

	def readCategory(self, id, model):
		return self.categoryManager.read(id, model.getId())

	def updateCategory(self, category, model):
		return self.categoryManager.update(category, model.getId())

	def deleteCategory(self, id, model):
		return self.categoryManager.delete(id, model.getId())

	def deleteCategory(self, category, model):
		return self.categoryManager.delete(category.getId(), model.getId())