Esempio n. 1
0
def getModel(
    config_file
):  # No need to specify last_layer as we don't need to do backward pass
    with open(config_file, 'r') as f:
        config = f.readlines()
    Whh = torch.load(config[-5].strip())
    Wxh = torch.load(config[-4].strip())
    Why = torch.load(config[-3].strip())
    Bhh = torch.load(config[-2].strip())
    Bhy = torch.load(config[-1].strip())

    model = Model()
    il = 0
    for desc in config[1:-5]:
        desc = desc.split()
        if desc[0] == 'rnn':
            in_features, hidden_features, out_features = int(desc[1]), int(
                desc[2]), int(desc[3])
            layer = RNN(in_features, hidden_features, out_features)
            layer.Whh = torch.Tensor(Whh[il])
            layer.Wxh = torch.Tensor(Wxh[il])
            layer.Why = torch.Tensor(Why[il])
            layer.Bhh = torch.Tensor(Bhh[il]).view(hidden_features, 1)
            layer.Bhy = torch.Tensor(Bhy[il]).view(out_features, 1)
            il += 1
        else:
            print(desc[0] + ' layer not implemented!')
        model.addLayer(layer)

    return model
Esempio n. 2
0
def analyzeTranslationInvariance():
    # setup model
    model = Model(open(Constants.fnCharList).read(),
                  DecoderType.BestPath,
                  must_restore=True)

    # read image and specify ground-truth text
    img = cv2.imread(Constants.fnAnalyze, cv2.IMREAD_GRAYSCALE)
    (w, h) = img.shape
    assert Model.imgSize[1] == w

    imgList = []
    for dy in range(Model.imgSize[0] - h + 1):
        targetImg = np.ones((Model.imgSize[1], Model.imgSize[0])) * 255
        targetImg[:, dy:h + dy] = img
        imgList.append(preprocess(targetImg, Model.imgSize))

    # put images and gt texts into batch
    batch = Batch([Constants.gtText] * len(imgList), imgList)

    # compute probabilities
    (texts, probs) = model.infer_batch(batch,
                                       calc_probability=True,
                                       probability_of_gt=True)

    # save results to file
    f = open(Constants.fnTranslationInvarianceTexts, 'wb')
    pickle.dump(texts, f)
    f.close()
    np.save(Constants.fnTranslationInvariance, probs)
Esempio n. 3
0
def getModel(ident):
    if ident == "SMD":
        print("Retrieved: spring-mass-damper system" +\
              "\n   p[0]   k/m" +\
              "\n   p[1]   d/m"+\
              "\n   p[2]   1/m\n")
        M = Model("Spring-Mass-Damper System",'ltiss',dt,[2,1,1,3],f_smd, A_smd, B_smd,\
                  F_smd, h_smd, C_smd, D_smd, H_smd)
        return M
    elif ident == "NLSMD":
        print("Retrieved: non-linear spring-mass-damper system" +\
              "\n   p[0]   k/m" +\
              "\n   p[1]   d/m"+\
              "\n   p[2]   1/m\n")
        M = Model("Non-linear Spring-Mass-Damper System",dt,[2,1,1,3],f_nlsmd, \
                  dfx_nlsmd, dfp_nlsmd, h_nlsmd, dhx_nlsmd, dhp_nlsmd)
        return M
    elif ident == "PEN":
        print("Retrieved: pendulum system" +\
              "\n   p[0]   g/l" +\
              "\n   p[1]   d/(ml^2)"+\
              "\n   p[2]   1/(ml^2)\n")
        M = Model("Pendulum without spring",dt,[2,1,1,3],f_pen, \
                  dfx_pen, dfp_pen, h_pen, dhx_pen, dhp_pen)
        return M
    else:
        print("Unknown system identifier. Options are: "+\
              "\n   SMD     spring-mass-damper system" +\
              "\n   NLSMD   Non-linear spring-mass-damper system" +\
              "\n   PEN     Pendulum\n" )
def getModel(config_file):
    with open(config_file,'r') as f:
        config = f.readlines()
    weights = torchfile.load(config[-2].strip())
    biases = torchfile.load(config[-1].strip())

    model = Model()
    il = 0
    for desc in config[1:-2]:
        desc = desc.split()
        if desc[0] == 'linear':
            in_features, out_features = int(desc[1]), int(desc[2])
            layer = Linear(in_features, out_features)
            layer.W = torch.Tensor(weights[il])
            layer.B = torch.Tensor(biases[il]).view(out_features, 1)
            il += 1
        elif desc[0] == 'relu':
            layer = ReLU()
        elif desc[0] == 'dropout':
            layer = Dropout(float(desc[1]), isTrain=False)
        else:
            print(desc[0] + ' layer not implemented!')
        model.addLayer(layer)

    return model
Esempio n. 5
0
    def test_normal_use(self):
        m = Model(3500, 100000)
        m.stepUp()
        self.assertEqual(m.getFrequency(), 3600)
        m.stepDown()
        self.assertEqual(m.getFrequency(), 3500)

        self.assertEqual(m.getFrequency(FrequencyUnit.HZ), 3500000)
        self.assertEqual(m.getFrequency(FrequencyUnit.MHZ), 3.5)
Esempio n. 6
0
 def __init__(self):
     
     self.inpt= {            # just the default to start with
                     "wind": 3,
                     "solar": 500,
                      "albedo": 0.25, 
                      "airt": 15,
                      "sfc": "grass (dry)",
                      "rs": 40,
                      "vp": 10,
                      "smd": 10
                     }
     self.rblist = [500.0, -30.0, 300.0, 200.0]
     self.eblist = [300.0, 100.0, 150.0, 200.0]
     # Polynomial fit to Graham Russell's smd fit for cereals
     # yfit = a*x**3+b*x+c   where x is smd in mm
     self.smdfit = [1.27791987e-04, -9.56796959e-02,  3.95338027e+01]
     self.mod = Model(self.inpt)
     self.tlist = self.mod.tlist
     self.vw = View(self.rblist, self.eblist, self.tlist)
     self.sfcs = ["grass (dry)","bare soil (dry)","cereals (dry)",
         "conifers (dry)","upland (dry)","grass (wet)", "bare soil (wet)",
         "cereals (wet)","conifers (wet)", "upland (wet)", "water"]
     self.bit_wind = widgets.BoundedIntText(value = self.inpt["wind"], min=1,  max=15, step=1, 
                                  description="u ($m \ s^{-1}$)", width=50)
     self.bit_solar = widgets.BoundedIntText(value = self.inpt["solar"], min=1, max=1000, step=10, 
                                     description="solar ($W m^{-2}$)", width=50)
     self.bit_vp = widgets.BoundedIntText(value =self.inpt["vp"], min=1, max=40, step=1, 
                                     description="vp (mbar)", width=50)
     self.dd_surface = widgets.Dropdown(value =self.inpt["sfc"], options=self.sfcs, 
                                    description="surface", width=50)
     self.bit_smd = widgets.BoundedIntText(value=self.inpt["smd"], min=1, max=180, step=5, 
                                    description="smd (mm)", width=50)
     self.bit_airt = widgets.BoundedIntText(value=self.inpt["airt"], min=-5, max=40, step=1, 
                                    description="air T (oC)", width=50)
     self.txt_rs = widgets.Text(description="rs")
     self.txt_rh  = widgets.Text(description="RH (%)")                              
     self.txt_le = widgets.Text(description="LE")
     self.txt_ra = widgets.Text(description="ra")
     # First time round to populate output boxes
     self.rblist, self.eblist, self.tlist, self.olist = self.mod.calculateLE(self.inpt)
     self.txt_rs.value = str('{0:.0f}'.format(self.olist[0]))
     self.txt_rh.value = str('{0:.1f}'.format(self.olist[1]))
     self.txt_le.value = str('{0:.1f}'.format(self.olist[2]))
     self.txt_ra.value = str('{0:.0f}'.format(self.olist[3]))
     
     self.bit_wind.observe(self.bit_wind_eventhandler, names='value')
     self.bit_solar.observe(self.bit_solar_eventhandler, names='value')
     self.bit_vp.observe(self.bit_vp_eventhandler, names='value')
     self.dd_surface.observe(self.dd_surface_eventhandler, names='value')
     self.bit_smd.observe(self.bit_smd_eventhandler, names='value')
     self.bit_airt.observe(self.bit_airt_eventhandler, names='value')
     self.h0 = widgets.HBox(children=[self.dd_surface, self.bit_smd])
     self.h1 = widgets.HBox(children=[self.bit_solar, self.bit_wind])
     self.h2 = widgets.HBox(children=[self.bit_airt, self.bit_vp, self.txt_rh])
     self.h3 = widgets.HBox(children=[self.txt_ra, self.txt_rs, self.txt_le])
def main():
    "main function"
    # optional command line args
    parser = argparse.ArgumentParser()
    parser.add_argument('--train', help='train the NN', action='store_true')
    parser.add_argument('--validate',
                        help='validate the NN',
                        action='store_true')
    parser.add_argument('--beamsearch',
                        help='use beam search instead of best path decoding',
                        action='store_true')
    parser.add_argument(
        '--wordbeamsearch',
        help='use word beam search instead of best path decoding',
        action='store_true')
    parser.add_argument('--dump',
                        help='dump output of NN to CSV file(s)',
                        action='store_true')

    args = parser.parse_args()

    decoderType = DecoderType.BestPath
    if args.beamsearch:
        decoderType = DecoderType.BeamSearch
    elif args.wordbeamsearch:
        decoderType = DecoderType.WordBeamSearch

    # train or validate on IAM dataset
    if args.train or args.validate:
        # load training data, create TF model
        loader = DataLoader(FilePaths.fnTrain, Model.batchSize, Model.imgSize,
                            Model.maxTextLen)

        # save characters of model for inference mode
        open(FilePaths.fnCharList, 'w').write(str().join(loader.charList))

        # save words contained in dataset into file
        open(FilePaths.fnCorpus, 'w').write(
            str(' ').join(loader.trainWords + loader.validationWords))

        # execute training or validation
        if args.train:
            model = Model(loader.charList, decoderType)
            train(model, loader)
        elif args.validate:
            model = Model(loader.charList, decoderType, mustRestore=True)
            validate(model, loader)

    # infer text on test image
    else:
        print(open(FilePaths.fnAccuracy).read())
        model = Model(open(FilePaths.fnCharList).read(),
                      decoderType,
                      mustRestore=True,
                      dump=args.dump)
        infer(model, FilePaths.fnInfer)
Esempio n. 8
0
    def __init__(self, config, logger, metadata):
        super(Task, self).__init__(config, logger, metadata)
        self.sum = 0

        self.model = Model(config.get_with_prefix("model"))
        self.data = Data(config.get_with_prefix("data"))
        self.trainer = Trainer(config.get_with_prefix("trainer"), self.model,
                               self.data)
        self.best_val_acc = 0
        self.number_worse_iterations = 0
Esempio n. 9
0
def plot_model(contact_matrix,
               susceptible,
               infectious_rate,
               days,
               reference_cases=None,
               measure_factor: float = 1,
               measure_day=0,
               reference_hospital=None,
               offset=0,
               first_patient_age=38,
               name='model',
               scenario=None,
               cap_ic=False,
               title='') -> Model:
    model = Model(contact_matrix,
                  susceptible,
                  infectious_rate,
                  measure_factor=measure_factor,
                  measure_day=measure_day,
                  first_patient_age=first_patient_age,
                  scenario=scenario,
                  cap_ic=cap_ic)
    model.run(days)

    y = [
        model.infected_data, model.recovered_data, model.exposed_data,
        model.hospital_data, model.ic_data, model.dead_data
    ]
    labels = ['infected', 'recovered', 'exposed', 'hospitalized', 'ic', 'dead']
    plot(y, labels, name, 'Amount', title=title)

    if reference_cases is not None:
        y = [reference_cases, model.case_data]
        labels = ['Reference Cases', 'Model Cases']
        plot(y,
             labels,
             'ref_cases',
             'Amount',
             title='Verschil model werkelijkheid')

    if reference_hospital is not None:
        y = [
            np.append(np.zeros(offset), reference_hospital),
            model.hospital_data + model.ic_data
        ]
        labels = ['Reference Hospital + ic', 'Model Hospital + ic']
        plot(y,
             labels,
             'ref_hospital',
             'Amount',
             title='Verschil model werkelijkheid')

    return model
Esempio n. 10
0
def analyzePixelRelevance():
	"simplified implementation of paper: Zintgraf et al - Visualizing Deep Neural Network Decisions: Prediction Difference Analysis"
	
	# setup model
	model = Model(open(Constants.fnCharList).read(), DecoderType.BestPath, mustRestore=True)
	
	# read image and specify ground-truth text
	img = cv2.imread(Constants.fnAnalyze, cv2.IMREAD_GRAYSCALE)
	(w, h) = img.shape
	assert Model.imgSize[1] == w
	
	# compute probability of gt text in original image
	batch = Batch([Constants.gtText], [preprocess(img, Model.imgSize)])
	(_, probs) = model.inferBatch(batch, calcProbability=True, probabilityOfGT=True)
	origProb = probs[0]
	
	grayValues = [0, 63, 127, 191, 255]
	if Constants.distribution == 'histogram':
		bins = [0, 31, 95, 159, 223, 255]
		(hist, _) = np.histogram(img, bins=bins)
		pixelProb = hist / sum(hist)
	elif Constants.distribution == 'uniform':
		pixelProb = [1.0 / len(grayValues) for _ in grayValues]
	else:
		raise Exception('unknown value for Constants.distribution')
	
	# iterate over all pixels in image
	pixelRelevance = np.zeros(img.shape, np.float32)
	for x in range(w):
		for y in range(h):
			
			# try a subset of possible grayvalues of pixel (x,y)
			imgsMarginalized = []
			for g in grayValues:
				imgChanged = copy.deepcopy(img)
				imgChanged[x, y] = g
				imgsMarginalized.append(preprocess(imgChanged, Model.imgSize))

			# put them all into one batch
			batch = Batch([Constants.gtText]*len(imgsMarginalized), imgsMarginalized)
			
			# compute probabilities
			(_, probs) = model.inferBatch(batch, calcProbability=True, probabilityOfGT=True)
			
			# marginalize over pixel value (assume uniform distribution)
			margProb = sum([probs[i] * pixelProb[i] for i in range(len(grayValues))])
			
			pixelRelevance[x, y] = weightOfEvidence(origProb, margProb)
			
			print(x, y, pixelRelevance[x, y], origProb, margProb)
			
	np.save(Constants.fnPixelRelevance, pixelRelevance)
Esempio n. 11
0
 def mainView(self):
     students = FileBoundStudentsFactory().create()
     students.retrieveState()
     model = Model(students)
     mainView = MainView(self.root, model)
     mainView.pack()
     self.root.mainloop()
def createModel(spec_file):
    with open(spec_file, 'r') as f:
        spec = f.readlines()
    lenspec = len(spec)
    model = Model()
    num_layers = 0
    for desc_ind in range(lenspec):
        desc = spec[desc_ind]
        desc = desc.split()
        if desc[0] == 'rnn':
            in_features, hidden_features, out_features = int(desc[1]), int(
                desc[2]), int(desc[3])
            layer = RNN(in_features, hidden_features, out_features)
            num_layers += 1
        else:
            print(desc[0] + ' layer not implemented!')
        model.addLayer(layer)
    return model, (spec, num_layers)
def before_first_request():
    global model, graph

    graph = tf.Graph()
    weight_location = 'weights'
    with graph.as_default():
        model = Model(vocabulary,
                      graph,
                      DecoderType.BestPath,
                      model_dir=weight_location)
def createModel(spec_file):
    with open(spec_file,'r') as f:
        spec = f.readlines()
    model = Model()
    num_linear_layers = 0
    for desc in spec:
        desc = desc.split()
        if desc[0] == 'linear':
            in_features, out_features = int(desc[1]), int(desc[2])
            layer = Linear(in_features, out_features)
            num_linear_layers += 1
        elif desc[0] == 'relu':
            layer = ReLU()
        elif desc[0] == 'dropout':
            layer = Dropout(float(desc[1]), isTrain=True)
        else:
            print(desc[0] + ' layer not implemented!')
        model.addLayer(layer)
    return model, (spec, num_linear_layers)
Esempio n. 15
0
def create_model(config_file):

    model = Model()

    with open(config_file, 'r') as f:
        num_layers = int(f.readline().strip())
        for i in range(num_layers):
            layer_info = f.readline().strip().split(' ')
            layer_type = layer_info[0]

            if layer_type == LINEAR:
                num_inputs = int(layer_info[1])
                num_outputs = int(layer_info[2])
                model.addLayer(Linear(num_inputs, num_outputs))
            elif layer_type == RELU:
                model.addLayer(ReLU())

        weight_file = f.readline().strip()
        bias_file = f.readline().strip()

        weights = load_file(weight_file)
        biases = load_file(bias_file)

    linear_index = 0
    for layer in model.Layers:
        if isinstance(layer, Linear):
            layer.W = weights[linear_index]
            layer.B = biases[linear_index]
            linear_index += 1

    return model
    def test_update(self):
        m = Model(3500, 100)
        do = DefaultOutput()

        m.registerOutput(do)
        m.start()
        self.assertEqual(3500, do.lastFreq)
Esempio n. 17
0
 def create_model(self):
     print("Create model called")
     if self.model is None:
         self.model = Model(self, None,
                            self.importExportDataManager.get_data())
     if self.getSelectedAlgorithm() == "Logistic Regression":
         self.model.set_model(
             LogisticRegression(solver='lbfgs', max_iter=1000))
     elif self.getSelectedAlgorithm() == "Decision Tree":
         self.model.model = DecisionTreeClassifier()
         print(self.model.model.__class__)
     elif self.getSelectedAlgorithm() == "Naive Bayes":
         self.model.set_model(GaussianNB())
     else:
         messagebox.showinfo("Warning", "No algorithm selected")
         return
     optimizeParamsMsgBox = messagebox.askquestion(
         "Optimize hyperparameters",
         "Would you like PyMine to optimize the parameters for this model?")
     if optimizeParamsMsgBox == 'yes':
         self.model.optimize_model_hyperparams(self.model.model)
     self.model.performance_summary()
Esempio n. 18
0
def main():
    """ Main function """
    # Opptional command line args
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--train", help="train the neural network", action="store_true")
    parser.add_argument(
        "--validate", help="validate the neural network", action="store_true")
    parser.add_argument(
        "--wordbeamsearch", help="use word beam search instead of best path decoding", action="store_true")
    args = parser.parse_args()

    decoderType = DecoderType.BestPath
    if args.wordbeamsearch:
        decoderType = DecoderType.WordBeamSearch

    # Train or validate on Cinnamon dataset
    if args.train or args.validate:
        # Load training data, create TF model
        loader = DataLoader(FilePaths.fnTrain, Model.batchSize,
                            Model.imgSize, Model.maxTextLen, load_aug=True)

        # Execute training or validation
        if args.train:
            model = Model(loader.charList, decoderType)
            train(model, loader)
        elif args.validate:
            model = Model(loader.charList, decoderType, mustRestore=False)
            validate(model, loader)

    # Infer text on test image
    else:
        print(open(FilePaths.fnAccuracy).read())
        model = Model(open(FilePaths.fnCharList).read(),
                      decoderType, mustRestore=False)
        infer(model, FilePaths.fnInfer)
Esempio n. 19
0
    def __init__(self):
        """ Controller Class constructor """
        # Cmd Line Argument Setup
        self.arg_parser = ArgParserFactory.create_arg_parser()
        self.args = self.arg_parser.parse_args()

        # Config File Setup
        self.config_handler = ConfigHandler()
        self.config_settings = self.config_handler.read_config(
            '../config/config.json')

        # File Factory
        self.file_factory = FileFactory
        self.directory_factory = DirectoryFactory
        # Empty Model
        self.model = Model()
Esempio n. 20
0
def infer_by_web(path, option):    
    decoderType = DecoderType.BestPath
    print(open(FilePaths.fnAccuracy).read())
    model = Model(open(FilePaths.fnCharList).read(),
                  decoderType, mustRestore=False)
    
    # show_wait_destroy2("received", path)
    print("Passing image to read for handwritten : ", path);         # modified 08092020
    recognized = infer(model, path)
    
    # morph = path.copy()
    # show_wait_destroy2("morph_received", morph)
    # print("Passing image to read for handwritten : ", morph);         # modified 08092020
    # recognized = infer(model, morph)
    
    return recognized
Esempio n. 21
0
def infer_samples(decoder_type=DecoderType.BestPath):
    # print(open(FilePaths.fnAccuracy).read())
    model = Model(open(FilePaths.fnCharList).read(),
                  decoder_type,
                  must_restore=True)
    path = os.path.join('data', 'test_images')
    image_files = [os.path.join(path, f) for f in os.listdir(path)]
    # print(image_files)
    print("######### Inferring Text from Test Images###############")

    for img in image_files:
        # print(img)
        try:
            infer(model, img, show_img=2)
        except Exception as e:
            print("Exception: " + str(e))
            pass
Esempio n. 22
0
def normalRun(step,freq,input,output,verbose):
    model = Model(freq, step)
    inputMethod, outputMethod = None, None

    if input=='cli':
        inputMethod = CliInput(model)

    if output=='cli':
        outputMethod = CliOutput()

    model.registerOutput(outputMethod)
    model.start()
    inputMethod.start()
Esempio n. 23
0
class Task(taskplan.Task):
    def __init__(self, config, logger, metadata):
        super(Task, self).__init__(config, logger, metadata)
        self.sum = 0

        self.model = Model(config.get_with_prefix("model"))
        self.data = Data(config.get_with_prefix("data"))
        self.trainer = Trainer(config.get_with_prefix("trainer"), self.model,
                               self.data)
        self.best_val_acc = 0
        self.number_worse_iterations = 0

    def save(self, path):
        self.model.save_weights(str(path / "model.h5py"))
        pickle.dump(self.best_val_acc, open(str(path / "best_model.pkl"),
                                            "wb"))

    def step(self, tensorboard_writer, current_iteration):
        with tensorboard_writer.as_default():
            val_acc = self.trainer.step(current_iteration)
            if val_acc is not None:
                if val_acc > self.best_val_acc:
                    self.best_val_acc = val_acc
                    self.model.save_weights(str(self.task_dir / "model.h5py"))
                    self.number_worse_iterations = 0
                else:
                    self.number_worse_iterations += 1

                if self.number_worse_iterations > 5:
                    self.pause_computation = True

                tf.summary.scalar('val/best_acc',
                                  self.best_val_acc,
                                  step=current_iteration)

    def load(self, path):
        self.model.load_weights(str(path / "model.h5py"))
        self.best_val_acc = pickle.load(
            open(str(path / "best_model.pkl"), "rb"))
Esempio n. 24
0
def generalize(M,p,S=[]):
    
    # Fetch everything from model
    f    = M.f;
    A    = M.A;
    B    = M.B;
    F    = M.F;
    h    = M.h;
    C    = M.C;
    D    = M.D;
    H    = M.H;
    th   = M.th;
    nx   = M.nx
    nu   = M.nu;
    ny   = M.ny;
    nth  = M.nth;
    name = M.name;
    typ  = M.typ;
    dt   = M.dt;
    Q    = M.Q;
    R    = M.R;
    
    # Generalize all system functions and matrices
    # State transition function 
    def f_t(x,u,th):
        x_t = zeros(p*nx);
        for i in range(0,p):
            x_t[i*nx:(i+1)*nx] = f(x[i*nx:(i+1)*nx],u[i*nu:(i+1)*nu],th);
        return x_t;
    
    # State transition function to state gradient
    def A_t(x,u,th):
        A_t = zeros((p*nx,p*nx));
        for i in range(0,p):
            A_t[i*nx:(i+1)*nx,i*nx:(i+1)*nx] = \
                            A(x[i*nx:(i+1)*nx],u[i*nu:(i+1)*nu],th);
        return A_t;
    
    # State transition function to input gradient
    def B_t(x,u,th):
        B_t = zeros((p*nx,p*nu));
        for i in range(0,p):
            B_t[i*nx:(i+1)*nx,i*nu:(i+1)*nu] = \
                            B(x[i*nx:(i+1)*nx],u[i*nu:(i+1)*nu],th);
        return B_t;
    
    # State transition function to parameter gradient
    def F_t(x,u,th):
        F_t = zeros((p*nx,nth));
        for i in range(0,p):
            F_t[i*nx:(i+1)*nx,:] = \
                            F(x[i*nx:(i+1)*nx],u[i*nu:(i+1)*nu],th);
        return F_t;
    
    # Output function 
    def h_t(x,u,th):
        y_t = zeros(p*ny);
        for i in range(0,p):
            y_t[i*nx:(i+1)*nx] = h(x[i*nx:(i+1)*nx],u[i*nu:(i+1)*nu],th);
        return y_t;
    
    # Output function to state gradient
    def C_t(x,u,th):
        C_t = zeros((p*ny,p*nx));
        for i in range(0,p):
            C_t[i*ny:(i+1)*ny,i*nx:(i+1)*nx] = \
                            C(x[i*nx:(i+1)*nx],u[i*nu:(i+1)*nu],th);
        return C_t;
    
    # Output function to input gradient
    def D_t(x,u,th):
        D_t = zeros((p*ny,p*nu));
        for i in range(0,p):
            D_t[i*ny:(i+1)*ny,i*nu:(i+1)*nu] = \
                            D(x[i*nx:(i+1)*nx],u[i*nu:(i+1)*nu],th);
        return D_t;
    
    # Measurement function to parameter gradient
    def H_t(x,u,th):
        H_t = zeros((p*ny,nth));
        for i in range(0,p):
            H_t[i*ny:(i+1)*ny,:] = \
                H(x[i*nx:(i+1)*nx],u[i*nu:(i+1)*nu],th);
        return H_t;
    
    # Include corellation information if specified
    if not isempty(Q) and not isempty(R):
        if not isempty(S):
            Pw = kron(S,inv(Q));
            Pz = kron(S,inv(R));
        else:
            Pw = kron(eye(p),inv(Q));
            Pz = kron(eye(p),inv(R));
    else:
        Pw = [];
        Pz = [];
        
    M_t = Model('Generalized '+ name, typ, dt, [p*nx,p*nu,p*ny,nth], \
                f_t,A_t,B_t,F_t,h_t,C_t,D_t,H_t,th,Pw,Pz);
    
    return M_t, kron(eye(p,p,1),eye(int(M_t.nx/p)));
Esempio n. 25
0
def main():
    print("⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆")
    print("★ Welcome to Language Detection by No Look Pass ★")
    print("⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆⋆")

    training_set = "OriginalDataSet/training-tweets.txt"

    # Training Set is fixed for the purposes of the demo
    # while True:
    #     print("Enter Training Set file path: ", end="")
    #     training_set = input()
    #     training_set = "OriginalDataSet/training-tweets.txt"
    #     # Source: https://www.guru99.com/python-check-if-file-exists.html
    #     if path.exists(training_set):
    #         print("\tOK: Training File Accepted\n")
    #         break
    #     else:
    #         print("\tERROR: File path does not exist\n")

    while True:
        print("Enter Test Set file path: ", end="")
        test_set = input()
        # test_set = "OriginalDataSet/test-tweets-given.txt"
        # Source: https://www.guru99.com/python-check-if-file-exists.html
        if path.exists(test_set):
            print("\tOK: Test File Accepted\n")
            break
        else:
            print("\tERROR: File path does not exist\n")

    # Create output folder
    create_directory("output/")

    # Initialize models
    byom = Model(v=3, n=3, delta=0.1)
    model_v0_n1_d0 = Model(v=0, n=1, delta=0)
    model_v1_n2_d05 = Model(v=1, n=2, delta=0.5)
    model_v1_n3_d1 = Model(v=1, n=3, delta=1)
    model_v2_n2_d03 = Model(v=2, n=2, delta=0.3)

    # Train models
    model_v0_n1_d0.train(training_set)
    model_v1_n2_d05.train(training_set)
    model_v1_n3_d1.train(training_set)
    model_v2_n2_d03.train(training_set)
    byom.train(training_set)

    # Test models using test set
    model_v0_n1_d0.test(test_set)
    model_v1_n2_d05.test(test_set)
    model_v1_n3_d1.test(test_set)
    model_v2_n2_d03.test(test_set)
    byom.test(test_set)

    # Evaluate models
    model_v0_n1_d0.evaluate()
    model_v1_n2_d05.evaluate()
    model_v1_n3_d1.evaluate()
    model_v2_n2_d03.evaluate()
    byom.evaluate()
def infer_model(char_list, infer_location):
    model = Model(char_list, DecoderType.BestPath)
    infer(model, infer_location)
        rec=recognized[0], proba=probability[0]))


def train_model(model, train_dir):
    loader = DataLoader(Model.batchSize, Model.imgSize, Model.maxTextLen,
                        train_dir)
    logging.info('Model chars: {chars}.'.format(chars=loader.charList))
    train(model, loader)


def infer_model(char_list, infer_location):
    model = Model(char_list, DecoderType.BestPath)
    infer(model, infer_location)


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s;%(levelname)s;%(message)s')

    vocabulary = '.0123456789'
    in_dir = 'C:/Users/FlorijnWim/PycharmProjects/htr-ctctcnn/traindata'
    output_model_dir = 'C:/Users/FlorijnWim/PycharmProjects/htr-ctctcnn/model_new'

    graph = tf.Graph()
    with graph.as_default():
        output_model = Model(vocabulary,
                             graph,
                             DecoderType.BestPath,
                             model_dir=output_model_dir)
        train_model(output_model, in_dir)
Esempio n. 28
0
class Mainframe(Frame):
    """
    Multiple Docuement Interface
    Everything in the application stems from the mainframe
    """
    def __init__(self, root):
        self.root = root

        self.importExportDataManager = ImportExportDataManager(self)
        self.dataPreprocessingManager = DataPreprocessingManager(self)
        self.discretizationManager = DiscretizationManager(self)
        self.optionsWindow = OptionsWindow(self)
        self.frame = Frame(root)
        self.model = None

        # create menu bar
        self.menubar = Menu(root)
        root.config(menu=self.menubar)
        self.fileMenu = Menu(self.menubar, tearoff=False)
        self.menubar.add_cascade(label="File", menu=self.fileMenu)
        self.fileMenu.add_command(
            label="Import data...",
            command=self.importExportDataManager.set_filename)
        self.fileMenu.add_command(label="Options...",
                                  command=self.optionsWindow.create)
        self.dataMenu = Menu(self.menubar, tearoff=False)
        self.menubar.add_cascade(label="Data", menu=self.dataMenu)
        self.dataMenu.add_command(label="Replace missing values...",
                                  command=self.dataPreprocessingManager.create)
        self.dataMenu.add_command(label="Discretize data...",
                                  command=self.discretizationManager.create)

        # create labels
        self.lblReduceFeatures = Label(self.frame, text="Reduce Features?")
        self.lblInterpolateMissingValues = Label(
            self.frame, text="Interpolate missing values?")
        self.lblAttributes = Label(self.frame, text="Select class label: ")
        self.lblAlgorithm = Label(self.frame, text="Select algorithm: ")

        # create comboboxes
        self.cmbReduceFeatures = Combobox(self.frame,
                                          state="readonly",
                                          values=["Yes", "No"],
                                          width=5)
        self.cmbInterpolateMissingValues = Combobox(self.frame,
                                                    state="readonly",
                                                    values=["Yes", "No"],
                                                    width=5)
        self.cmbAttributes = Combobox(self.frame, state="readonly")
        self.cmbAlgorithm = Combobox(
            self.frame,
            state="readonly",
            values=["Logistic Regression", "Decision Tree", "Naive Bayes"])

        # set default combobox values
        self.cmbReduceFeatures.set("No")
        self.cmbInterpolateMissingValues.set("No")
        self.cmbAlgorithm.set("None")

        # create preview data table
        self.previewDataTable = PreviewDataTable(self)

        # create buttons
        self.runButton = Button(text="Run", width=10)
        self.runButton.bind("<Button-1>", self.run_algorithm)

        # set grid layout
        rowNumber = 0
        self.frame.grid(row=rowNumber, column=1, sticky=W)
        self.lblReduceFeatures.grid(row=rowNumber, column=1, sticky=W)
        self.cmbReduceFeatures.grid(row=rowNumber, column=2, sticky=W)
        rowNumber += 1
        self.lblInterpolateMissingValues.grid(row=rowNumber,
                                              column=1,
                                              sticky=W)
        self.cmbInterpolateMissingValues.grid(row=rowNumber,
                                              column=2,
                                              sticky=W)
        rowNumber += 1
        self.lblAttributes.grid(row=rowNumber, column=1, sticky=W)
        self.cmbAttributes.grid(row=rowNumber, column=2, sticky=W)
        rowNumber += 1
        self.lblAlgorithm.grid(row=rowNumber, column=1, sticky=W)
        self.cmbAlgorithm.grid(row=rowNumber, column=2, sticky=W)
        rowNumber += 1
        Label(root, text="").grid(row=rowNumber, column=1)
        rowNumber += 1

        self.previewDataTable.treeview.grid(
            row=rowNumber,
            column=1,
            rowspan=self.previewDataTable.treeview.winfo_width(),
            columnspan=2,
            sticky=W)
        rowNumber += 1
        Label(root, text="").grid(row=rowNumber, column=1)
        rowNumber += 1

        self.runButton.grid(row=rowNumber, column=1)
        rowNumber += 1

    def getReduceFeatureOption(self):
        return self.cmbReduceFeatures.get()

    def getInterpolateMissingValuesOption(self):
        return self.cmbInterpolateMissingValues.get()

    def getSelectedClassLabel(self):
        return self.cmbAttributes.get()

    def getSelectedAlgorithm(self):
        return self.cmbAlgorithm.get()

    def getSelectedUserParams(self, event):
        print("Reduce features: " + self.getReduceFeatureOption())
        print("Interpolate missing values: " +
              self.getInterpolateMissingValuesOption())
        print("Class label: " + self.getSelectedClassLabel())
        print("Selected algorithm: " + self.getSelectedAlgorithm())
        print(self.importExportDataManager.summary())

    def run_algorithm(self, event):
        if self.importExportDataManager.get_filename() is not None and \
                isinstance(self.importExportDataManager.get_data(), pd.DataFrame):
            if self.getReduceFeatureOption() == 'Yes':
                print("Reducing features...")
            if self.getInterpolateMissingValuesOption() == 'Yes':
                print("Interpolating values...")
            self.create_model()

    def create_model(self):
        print("Create model called")
        if self.model is None:
            self.model = Model(self, None,
                               self.importExportDataManager.get_data())
        if self.getSelectedAlgorithm() == "Logistic Regression":
            self.model.set_model(
                LogisticRegression(solver='lbfgs', max_iter=1000))
        elif self.getSelectedAlgorithm() == "Decision Tree":
            self.model.model = DecisionTreeClassifier()
            print(self.model.model.__class__)
        elif self.getSelectedAlgorithm() == "Naive Bayes":
            self.model.set_model(GaussianNB())
        else:
            messagebox.showinfo("Warning", "No algorithm selected")
            return
        optimizeParamsMsgBox = messagebox.askquestion(
            "Optimize hyperparameters",
            "Would you like PyMine to optimize the parameters for this model?")
        if optimizeParamsMsgBox == 'yes':
            self.model.optimize_model_hyperparams(self.model.model)
        self.model.performance_summary()
Esempio n. 29
0
def run(filename):
    "main function"
    # optional command line args

    parser = argparse.ArgumentParser()
    parser.add_argument('--train', help='train the NN', action='store_true')
    parser.add_argument('--validate',
                        help='validate the NN',
                        action='store_true')
    parser.add_argument('--beamsearch',
                        help='use beam search instead of best path decoding',
                        action='store_true')
    parser.add_argument(
        '--wordbeamsearch',
        help='use word beam search instead of best path decoding',
        action='store_true')
    parser.add_argument('--dump',
                        help='dump output of NN to CSV file(s)',
                        action='store_true')

    args = parser.parse_args()

    decoderType = DecoderType.BestPath
    if args.beamsearch:
        decoderType = DecoderType.BeamSearch
    elif args.wordbeamsearch:
        decoderType = DecoderType.WordBeamSearch

    # train or validate on IAM dataset
    if args.train or args.validate:
        # load training data, create TF model
        loader = DataLoader(FilePaths.fnTrain, Model.batchSize, Model.imgSize,
                            Model.maxTextLen)

        # save characters of model for inference mode
        open(FilePaths.fnCharList, 'w').write(str().join(loader.charList))

        # save words contained in dataset into file
        open(FilePaths.fnCorpus, 'w').write(
            str(' ').join(loader.trainWords + loader.validationWords))

        # execute training or validation
        if args.train:
            model = Model(loader.charList, decoderType)
            train(model, loader)
        elif args.validate:
            model = Model(loader.charList, decoderType, mustRestore=True)
            validate(model, loader)

    # infer text on test image
    else:
        index_list = []
        result_list = []
        prob_list = []
        print(open(FilePaths.fnAccuracy).read())
        model = Model(open(FilePaths.fnCharList).read(),
                      decoderType,
                      mustRestore=True,
                      dump=args.dump)

        for dirpath, dirnames, files in os.walk('../output_words/' + filename,
                                                topdown=False):
            for sub_file in sorted(files, key=getint):
                img_path = dirpath + '/' + sub_file
                # print('---------------------------------------------------')
                index_number, _ = str(sub_file).split('.')
                # print("File path: "+img_path)
                try:
                    result, prob = infer(model, img_path)
                except ValueError:
                    print("Value error")
                    continue
                # print(index_number, result, prob)
                index_list.append(index_number)
                result_list.append(result)
                prob_list.append(prob)

        return index_list, result_list, prob_list
Esempio n. 30
0
    with open('settings.yml', 'r') as f:
        SETTINGS = yaml.load(f, Loader=yaml.FullLoader)

    # Start up interface
    gui = Interface(SETTINGS)
    gui.startup_window()

    # After pressing start in startup_window load in whole program
    from src.Loader import Loader
    from src.Model import Model

    # Initialize reader loading new images
    reader = Loader(SETTINGS)

    # Define model and load in weights
    predictor = Model(SETTINGS)
    nn = predictor.load()
    test_data_loaded = False  #indication of the loaded dataset

    gui.startup_window.close()
    window = gui.window

    # Event loop. Read buttons, make callbacks
    while True:
        # Read the Window
        event, value = window.read()
        if event in ('Quit', None):
            break

        # Lookup event in function dictionary
        elif event in gui.dispatch_dictionary: