def classifier(self): """ Returns a classifier object, which is created on demand. This means if the ClipClassifier is copied to a new process a new Classifier instance will be created. """ if globs._classifier is None: t0 = datetime.now() logging.info("classifier loading") globs._classifier = Model( tools.get_session(disable_gpu=not self.config.use_gpu)) globs._classifier.load(self.config.classify.model) logging.info("classifier loaded ({})".format(datetime.now() - t0)) return globs._classifier
def get_full_classifier(config): from ml_tools.model import Model """ Returns a classifier object, which is created on demand. This means if the ClipClassifier is copied to a new process a new Classifier instance will be created. """ t0 = datetime.now() logging.info("classifier loading") classifier = Model( train_config=config.train, session=tools.get_session(disable_gpu=not config.use_gpu), ) classifier.load(config.classify.model) logging.info("classifier loaded ({})".format(datetime.now() - t0)) return classifier
def classifier(self): """ Returns a classifier object, which is created on demand. This means if the ClipClassifier is copied to a new process a new Classifier instance will be created. """ if globs._classifier is None: t0 = datetime.now() logging.info("classifier loading") if self.kerasmodel: model = KerasModel(self.config.train) model.load_weights(self.model_file) globs._classifier = model else: globs._classifier = Model( train_config=self.config.train, session=tools.get_session( disable_gpu=not self.config.use_gpu), ) globs._classifier.load(self.model_file) logging.info("classifier loaded ({})".format(datetime.now() - t0)) return globs._classifier
def __init__(self, train_config=None, session=None, training=False, tflite=False): self.tflite = tflite self.training = training self.use_gru = train_config.use_gru self.name = self.model_name() self.session = session or tools.get_session() self.saver = None tf.compat.v1.disable_eager_execution() # datasets self.datasets = namedtuple("Datasets", "train, validation, test") # ------------------------------------------------------ # placeholders, used to feed data to the model # ------------------------------------------------------ self.X = None self.y = None self.keep_prob = None self.is_training = None self.global_step = None # ------------------------------------------------------ # tensorflow nodes used to evaluate # ------------------------------------------------------ # prediction for each class(probability distribution) self.prediction = None # accuracy of batch self.accuracy = None # total loss of batch self.loss = None # training operation self.train_op = None self.novelty = None self.novelty_distance = None self.state_in = None self.state_out = None self.logits_out = None self.hidden_out = None self.lstm_out = None # we store 1000 samples and use these to plot projections during training self.train_samples = None self.val_samples = None # number of samples to use when evaluating the model, 1000 works well but is a bit slow, # 100 should give results to within a few percent. self.eval_samples = 500 # number of samples to use when generating the model report, # atleast 1000 is recommended for a good representation self.report_samples = 2000 # how often to do an evaluation + print self.print_every = 6000 # restore best weights found during training rather than the most recently one. self.use_best_weights = True # the score this model got on it's final evaluation self.eval_score = None # our current global step self.step = 0 # enabled parallel loading and training on data (much faster) self.enable_async_loading = True # folder to write tensorboard logs to if train_config: self.log_dir = os.path.join(train_config.train_dir, "logs") self.checkpoint_folder = os.path.join(train_config.train_dir, "checkpoints") else: self.log_dir = "./logs" self.checkpoint_folder = "./checkpoints" self.log_id = "" # number of frames per segment during training self.training_segment_frames = 27 # number of frames per segment during testing self.testing_segment_frames = 27 # dictionary containing current hyper parameters self.params = { # augmentation "augmentation": True, "thermal_threshold": 10, "scale_frequency": 0.5, # dropout "keep_prob": 0.5, # training "batch_size": 16, } """ List of labels this model can classifiy. """ self.labels = [] # used for tensorboard self.writer_train = None self.writer_val = None self.merged_summary = None # this defines our input shape self.frame_count = 1 if self.training: self.frame_count = self.training_segment_frames
def __init__(self, session=None): self.name = "model" self.session = session or tools.get_session() self.saver = None # datasets self.datasets = namedtuple('Datasets', 'train, validation, test') # ------------------------------------------------------ # placeholders, used to feed data to the model # ------------------------------------------------------ self.X = None self.y = None self.keep_prob = None self.is_training = None self.global_step = None # ------------------------------------------------------ # tensorflow nodes used to evaluate # ------------------------------------------------------ # prediction for each class(probability distribution) self.prediction = None # accuracy of batch self.accuracy = None # total loss of batch self.loss = None # training operation self.train_op = None self.novelty = None self.novelty_distance = None self.state_in = None self.state_out = None self.logits_out = None self.hidden_out = None self.lstm_out = None # we store 1000 samples and use these to plot projections during training self.train_samples = None self.val_samples = None # number of samples to use when evaluating the model, 1000 works well but is a bit slow, # 100 should give results to within a few percent. self.eval_samples = 500 # number of samples to use when generating the model report, # atleast 1000 is recommended for a good representation self.report_samples = 2000 # how often to do an evaluation + print self.print_every = 6000 # restore best weights found during training rather than the most recently one. self.use_best_weights = True # the score this model got on it's final evaluation self.eval_score = None # our current global step self.step = 0 # enabled parallel loading and training on data (much faster) self.enable_async_loading = True # folder to write tensorboard logs to self.log_dir = './logs' self.log_id = '' # number of frames per segment during training self.training_segment_frames = 27 # number of frames per segment during testing self.testing_segment_frames = 27 # dictionary containing current hyper parameters self.params = { # augmentation 'augmentation': True, 'thermal_threshold': 10, 'scale_frequency': 0.5, # dropout 'keep_prob': 0.5, # training 'batch_size': 16 } """ List of labels this model can classifiy. """ self.labels = [] # used for tensorboard self.writer_train = None self.writer_val = None self.merged_summary = None