def process_image(self, image): file_converter = FileConverter() #convert the image to byte string image_bytes = file_converter.png_to_jpeg(image) scanner = Scanner() #scan the image and give it a birds eye view, returns a np of pixels that makes up the image scan_np = scanner.scan(image_bytes) #extract the individual answers from the scanned test extractor = Extract() answers = extractor.get_all_answers(scan_np, 5) color = Color() bw_answers = color.all_ans_to_bw(answers) size = Size() DIM = (28, 28) shrunk_images = size.shrink_images(bw_answers, DIM) #convert the answer images to a single array, which we used in training our model answers_flat = file_converter.convert_images( shrunk_images) #returns image as (1, 28, 28, 1) and as type float #now that we have a list of images of the answers as bw 1D numpy arrays, # we can run them through our model and grade them # first we need to load our model model_loader = ModelLoader() MODEL_JSON = 'models/modified_model_98.json' MODEL_WEIGHTS = 'models/modified_model_98.h5' model = model_loader.load_model_2(MODEL_JSON, MODEL_WEIGHTS) #compile model model.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) grader = Grader() answers = grader.get_answers(answers_flat, model) #get the images as a 784 (28x28) length string so we can store the data in a database ans_strings = file_converter.get_string_images(answers_flat) compressed_images = file_converter.compress_images(ans_strings) #add the images to database so we can create a large dataset of handwritten letters # storage = Storage() # storage.insert(answers, compressed_images) return answers
def get(self): modelLoader = ModelLoader.getInstance() device = modelLoader.getDevice() model = modelLoader.getModel() modelc = modelLoader.getModelc() detectedObject_list = detect.object_detection( imgsz=[576], name="query", source="yolov5/hanssem/images/query", device=device, model=model, modelc=modelc, iou_thres=0.1) return jsonify({"detected_objectList": detectedObject_list})
from ModelBuilder import ModelBuilder from ModelEvaluator import ModelEvaluator from DataTransformer import multi_csv_to_dataset from ModelLoader import ModelLoader dataset = multi_csv_to_dataset([ 'test_data/SHOP_daily.csv', # 'test_data/TD_daily.csv', # 'test_data/ENB_daily.csv', # 'test_data/BA_daily.csv', # 'test_data/TSLA_daily.csv' ]) model_loader = ModelLoader() #test_data = ModelBuilder().build_model(dataset, 150) #model_loader.save_model(test_data.model, 'multistock-2020-04-09') test_data = ModelBuilder().split_test_data(dataset, 0.7) test_data.model = model_loader.load_model('multistock-2020-04-09.h5') evaluator = ModelEvaluator() evaluator.analyze(test_data) evaluator.plot(test_data)
"A_{0}.json".format(epoch), "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights( config.get("model", "dir") + "/" + config.get("model", "name") + "W_{0}.h5".format(epoch)) print("Saved model to disk") return if __name__ == "__main__": # Specify number of particles to use and number of features nParticles = 60 #nFeatures=51 nFeatures = 47 loader = ModelLoader((nParticles, nFeatures)) model = loader.load() utils = Utilities(nParticles) history = Histories() history.set_up_config(config=config) history.on_train_begin() # Build the first training dataset print("TRAIN_DATA: ", TRAIN_DATA) X_train, Y, W_train, MVA_train = utils.BuildBatch(indir=TRAIN_DATA) for epoch in range(100): pool_local = ThreadPool(processes=1) # Shuffle loaded datasets and begin inds = range(len(X_train)) np.random.shuffle(inds) X_epoch, Y_epoch, W_epoch, MVA_epoch = X_train[inds], Y[inds], W_train[
import numpy as np #from PIL import Image from Pillow import Image from timeit import default_timer as timer import matplotlib.pyplot as plt import matplotlib.patches as patches from ModelLoader import ModelLoader from PostProcessor import PostProcessor from preprocess import PreProcessor import json model_loader = ModelLoader() img = Image.open('/home/bilal/Downloads/foto_van_yosra1.jpg') # print("---------------------------------") # print("0 for tinyYolo") # val = input("give your neural network architecture type: ") preprocessor = PreProcessor(0, img) img_data = preprocessor.preprocess() # print("---------------------------------------------") # load a simple model session = model_loader.load_session(1) begin = timer() # see the input name and shape input_name = session.get_inputs()[0].name """# print("input name = ", input_name) input_shape = session.get_inputs()[0].shape
help='Numbers of extraction summaries') parser.add_argument( "--lang", default='en', type=str, help= 'If language of article isn\'t Englisth, will automatically translate by google' ) parser.add_argument( "--super_long", action='store_true', help='If length of article >512, this option is needed') args = parser.parse_args() # if args.super_long: # sys.stdout.write('\n<Warning: Number of extractions might slightly altered since with --super_long option>\n') # Language initiator print('Language initializing...') lf = LangFactory(args.lang) translator = None if args.lang in lf.support_lang else TranslatorY() print('Data loading...') data = DataLoader(args.txt_file, args.super_long, args.lang, translator).data print('Model loading...') model = ModelLoader(lf.toolkit.cp, lf.toolkit.opt, args.lang) print('Summarizing...') summarizer = Summarizer(data, model, args.n, translator)
# import numpy as np from PIL import Image import time import sys # import matplotlib.pyplot as plt # import matplotlib.patches as patches from ModelLoader import ModelLoader from PostProcessor import PostProcessor from preprocess import PreProcessor # import json [script, image, onnxmodel] = sys.argv model_loader = ModelLoader(onnxmodel) img = Image.open(image) # print("---------------------------------") # print("0 for tinyYolo") # val = input("give your neural network architecture type: ") preprocessor = PreProcessor(0, img) img_data = preprocessor.preprocess() # print("---------------------------------------------") # load a simple model session = model_loader.load_session(1) begin = time.time() # see the input name and shape input_name = session.get_inputs()[0].name print(session.get_inputs()[0]) """# print("input name = ", input_name) input_shape = session.get_inputs()[0].shape
from ModelLoader import ModelLoader from ModelMetadataCache import ModelMetadataCache globalModelLoader = ModelLoader(ModelMetadataCache()) currentLevel = None GRIDSIZE = 2 CONGASPEED = 0.1 CONGASTEP = 0.1 TURNSPEED = 12 TILESIZE = 2 SCALE = 1 COLLIDE_DEBUG = 0 CAMERA_PAUSE = 30 INIT = 1 LEAVING = [] COLORS = [] WALKERS = [] CAMERA_MOVE = 4 CAMERA_MOVE_ANGLE = 4 def turnAngle(angle, angleTo, amt): if abs(angle - 360 - angleTo) < abs(angle - angleTo): angle -= 360 elif abs(angle + 360 - angleTo) < abs(angle - angleTo): angle += 360 return moveInc(angle, angleTo, amt) % 360 def moveInc(move, moveTo, inc): if moveTo > move:
# -*- coding: utf-8 -*- import json from config import Config from ModelLoader import ModelLoader from flask_sqlalchemy import SQLAlchemy from flask import Flask, render_template, request print("Initiating server. Please wait until models' loading is finished.") model_loader = ModelLoader() app = Flask(__name__) app.config.from_object(Config) db = SQLAlchemy(app) from db.dbmodel import Feedback @app.route("/") def main(): return render_template("index.html", model_type="authors", models=model_loader.get_models()) @app.route("/auto") def autocomplete(): term = request.args.get("term") auto = model_loader.autocomplete(term) auto = json.dumps(list(auto)) auto = bytearray(auto, "utf-8") return auto
def __init__(self): self.version = "1.2.1" logging.info("Starting Toggle " + self.version) file_path = os.path.join("/etc/toggle", "local.cfg") if not os.path.exists(file_path): logging.info(file_path + " does not exist, Creating one") os.mknod(file_path) os.chmod(file_path, 0o777) # Parse the config files. config = CascadingConfigParser([ '/etc/toggle/default.cfg', '/etc/toggle/printer.cfg', '/etc/toggle/local.cfg' ]) # Get loglevel from the Config file level = config.getint('System', 'loglevel') if level > 0: logging.getLogger().setLevel(level) sys.stdout = LoggerWriter(config, logging, 20) sys.stderr = LoggerWriter(config, logging, 50) Clutter.init(None) style = Mx.Style.get_default() style.load_from_file(config.get("System", "stylesheet")) config.ui = Clutter.Script() try: config.ui.load_from_file(config.get("System", "ui")) except: print "Error loading UI" import traceback traceback.print_exc() config.stage = config.ui.get_object("stage") config.stage.connect("destroy", self.stop) config.stage.connect('key-press-event', self.key_press) # Set up tabs config.tabs = CubeTabs(config.ui, 4) config.splash = Splash(config) config.splash.set_status("Starting Toggle " + self.version + "...") config.jog = Jog(config) config.temp_graph = TemperatureGraph(config) config.filament_graph = FilamentGraph(config) # Set up SockJS and REST clients host = config.get("Rest", "hostname") config.rest_client = RestClient(config) # Add other stuff config.volume_stage = VolumeStage(config) config.message = Message(config) config.printer = Printer(config) config.loader = ModelLoader(config) config.plate = Plate(config) config.socks_client = WebSocksClient(config, host="ws://" + host + ":5000") # mouse use_mouse = int(config.get('System', 'mouse')) self.cursor = config.ui.get_object("cursor") if use_mouse: config.stage.connect("motion-event", self.mouse_move) logging.info("Mouse is active") else: logging.info("Mouse is not active") self.cursor.set_opacity(0) config.push_updates = JoinableQueue(10) self.config = config config.plate.make_scale() config.stage.show()
def __init__(self): from .__init__ import __version__ logging.info("Initializing Toggle {}".format(__version__)) file_path = os.path.join("/etc/toggle", "local.cfg") if not os.path.exists(file_path): logging.info(file_path + " does not exist, Creating one") os.mknod(file_path) os.chmod(file_path, 0o666) # Parse the config files. config = CascadingConfigParser([ '/etc/toggle/default.cfg', '/etc/toggle/printer.cfg', '/etc/toggle/local.cfg' ]) # Get loglevel from the Config file level = config.getint('System', 'loglevel') if level > 0: logging.getLogger().setLevel(level) sys.stdout = LoggerWriter(config, logging, logging.INFO) sys.stderr = LoggerWriter(config, logging, logging.FATAL) Clutter.init(None) style = Mx.Style.get_default() style.load_from_file(config.get("System", "stylesheet")) config.ui = Clutter.Script() try: config.ui.load_from_file(config.get("System", "ui")) except: print("Error loading UI") import traceback traceback.print_exc() config.stage = config.ui.get_object("stage") config.stage.connect("destroy", self.stop) config.stage.connect('key-press-event', self.key_press) config.screen_width = config.getint("Screen", "width") config.screen_height = config.getint("Screen", "height") config.screen_rot = config.get("Screen", "rotation") config.screen_full = config.getboolean("Screen", "fullscreen") # Set up tabs config.tabs = CubeTabs(config.ui, 4) config.splash = Splash(config) config.splash.set_status("Starting Toggle {} ...".format(__version__)) config.jog = Jog(config) config.temp_graph = TemperatureGraph(config) config.filament_graph = FilamentGraph(config) m = Network.get_manager() if m == "connman": logging.debug("Using Connman") config.network = ConnMan() elif m == "nm": logging.debug("Using NetworkManager") config.network = NetworkManager() else: logging.warning("Neither NetworkManager nor Connman was found") config.Settings = Settings(config) # Set up SockJS and REST clients config.rest_client = RestClient(config) # Add other stuff config.volume_stage = VolumeStage(config) config.message = Message(config) config.printer = Printer(config) config.loader = ModelLoader(config) config.plate = Plate(config) config.socks_client = WebSocksClient(config) # mouse use_mouse = int(config.get('Input', 'mouse')) self.cursor = config.ui.get_object("cursor") if use_mouse: config.stage.connect("motion-event", self.mouse_move) logging.info("Mouse is active") else: config.stage.connect("touch-event", self.mouse_move) logging.info("Mouse is not active, using touch instead") self.cursor.set_opacity(0) config.mouse_invert_x = config.getboolean('Input', 'mouse_invert_x') config.mouse_invert_y = config.getboolean('Input', 'mouse_invert_y') config.mouse_swap = config.getboolean('Input', 'mouse_swap_xy') config.push_updates = JoinableQueue(10) self.config = config config.plate.make_scale() config.stage.show()
configuration_name = args.config ###### Parse config: ##### config = ConfigParser.RawConfigParser() config.read(configuration_name) TRAIN_DATA = config.get("data", "train") TEST_DATA = config.get("data", "test") TRAINING_RES = config.get("model", "dir") MODEL_NAME = config.get("model", "name") # Specify number of particles to use and number of features nParticles = 60 #nFeatures=51 nFeatures = 47 loader = ModelLoader((nParticles, nFeatures)) ## Define Loss for the model: from Loss.Loss import multi_weighted_logloss utils = Utilities(nParticles) #history = Histories() #history.set_up_config(config=config) #history.on_train_begin() # Build the first training dataset print("TRAIN_DATA: ", TRAIN_DATA) X_train, Y, W_train, MVA_train = utils.BuildBatch(indir=TRAIN_DATA, nEvents=50, nFiles=10) model = loader.load_multiclass( ouput_class=4,