def main(): # Get parameters from arguments parser = argparse.ArgumentParser(description='Model training') parser.add_argument('-c', '--config_path', type=str, default='./config/diwu_rematch.py', help='Configuration file') arguments = parser.parse_args() assert arguments.config_path is not None, 'Please provide a path using -c config/pathname in the command line' print('\n > Start Time:') print(' ' + datetime.now().strftime('%a, %d %b %Y-%m-%d %H:%M:%S')) start_time = time.time() # Define the user paths # Load configuration files configuration = Configuration(arguments.config_path) cf = configuration.load() configurationPATH(cf) process(cf) # End Time end_time = time.time() print('\n > End Time:') print(' ' + datetime.now().strftime('%a, %d %b %Y-%m-%d %H:%M:%S')) print('\n ET: ' + HMS(end_time - start_time))
def main(): # Define environment variables # Environment() # Get parameters from arguments parser = argparse.ArgumentParser(description='Model training') parser.add_argument('-c', '--config_path', type=str, default=None, help='Configuration file') parser.add_argument('-e', '--exp_name', type=str, default=None, help='Name of the experiment') parser.add_argument('-s', '--shared_path', type=str, default='/data/module5', help='Path to shared data folder') parser.add_argument('-l', '--local_path', type=str, default='/home/master/sufav/results', help='Path to local data folder') arguments = parser.parse_args() assert arguments.config_path is not None, 'Please provide a configuration' \ 'path using -c config/pathname' \ ' in the command line' assert arguments.exp_name is not None, 'Please provide a name for the ' \ 'experiment using -e name in the ' \ 'command line' # Define the user paths shared_path = arguments.shared_path local_path = arguments.local_path dataset_path = os.path.join(local_path, 'Datasets') shared_dataset_path = os.path.join(shared_path, 'Datasets') experiments_path = os.path.join(local_path, getuser(), 'Experiments') # Note: this should not be used shared_experiments_path = os.path.join(shared_path, getuser(), 'Experiments') usr_path = os.path.join('/home/', getuser()) # Load configuration files configuration = Configuration(arguments.config_path, arguments.exp_name, dataset_path, shared_dataset_path, experiments_path, shared_experiments_path, usr_path) cf = configuration.load() # Train /test/predict with the network, depending on the configuration process(cf) # Copy result to shared directory configuration.copy_to_shared()
def main(): config = Configuration() if config.config.cmd == 'train': train_seg(config.config) elif config.config.cmd == 'test': test_seg(config.config)
def sendAlarm(self, abstand): config = Configuration() alert = AService.AlertService(self.pubsub) message = "Alarm, wir haben den Schwellwert von:", alert.minimumdistance, " mit", float( abstand), alert.unit, " unterschritten" self.client.publish(config.config["alert_topic"], str(message)) pass
def _save_user_xml(self, random_password): user_xml = os.path.expanduser("~/.faraday/config/user.xml") if not os.path.exists(user_xml): shutil.copy(FARADAY_BASE_CONFIG_XML, user_xml) conf = Configuration(user_xml) conf.setAPIUrl('http://localhost:5985') conf.setAPIUsername('faraday') conf.setAPIPassword(random_password) conf.saveConfig()
def restore_defaults(self): reply = QtWidgets.QMessageBox.question( self, 'Message', "Are you sure to restore the defaults?", QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.No) if reply == QtWidgets.QMessageBox.Yes: self._tmp = Configuration() self._tmp.save() config.read() self.accept()
def update(self): microbars = [ MicroBar(minimum=0, maximum=1) for _ in Configuration().limits ] out = '' out += 'Thermal Load: ' for (fan, headroom, microbar) in zip(Configuration().fans, State().headrooms, microbars): try: load = str(round(1 - headroom, 2))[1:4].ljust(3, '0') bar = microbar.get_bar(value=1 - headroom) out += fan + ' ' out += load + ' ' out += bar + ' ' except TypeError: pass print(out, end='\r')
def load(path): logger = Txt_file_logger() try: with open(path) as json_file: config = Configuration(**json.load(json_file)) except OSError: logger.error(f"Can`t load config {path}") return Json_config_loader._verify(config)
def setting_communications(): try: print("before first request") cwd = os.path.dirname(os.path.abspath(__file__)) Configuration(os.path.join(cwd, 'config/config.json')) config = Configuration.get() db = Mongo() pub = Publisher(config["AMQP"]) mqttc = Mongo.get_mqttc() threading.Thread(target=mqttc_keep_alive, args=(mqttc, )).start() except Exception as e: print("log", str(e))
def appSpecificInit(self, conf:dict): """This method performs application-specific initialization for the Info application, at app creation time.""" #---------------------------------------------------------- # First, get the system configuration, because it contains # key information we need, such as the location of the AI's # data directory. sysConf = Configuration() # Note this retrieves the singleton instance # of the Configuration class. #------------------------------------------------------ # First, get the location of the AI's data directory, # which is in the system configuration. aiDataDir = sysConf.aiDataDir #----------------------------------------------------- # Next, we need to get the name of the info text file # (relative to that directory). This comes from our # app-specific configuration data. infoFilename = conf['info-filename'] #------------------------------------------------------ # Next, we need to construct the full pathname of the # info text file. infoPathname = path.join(aiDataDir, infoFilename) #------------------------------------------------------ # Next, we need to actually load the info text from the # appropriate data file in that directory. with open(infoFilename) as file: infoText = "\n" + file.read() + "\n" #-------------------------------------------------- # Next, we size our window to exactly fit the text. self.window.nRows = countLines(infoText) #---------------------------------------------- # Finally, we have our window display the text. self.window.addText(helpMsg)
def database_backup(): """ Copies db.json from current path to the archieve one with prepending current datetime """ config = Configuration() today = datetime.datetime.today().isoformat() path_to_db = config.get_db_path() path_to_db_without_filename = os.path.dirname(path_to_db) path_to_copy = os.path.join(path_to_db_without_filename, "arch", today + "_db.json") # if archive path does not exist, create one if not os.path.exists(os.path.dirname(path_to_copy)): os.makedirs(os.path.dirname(path_to_copy)) shutil.copy(path_to_db, path_to_copy)
def __init__(self, pubsub): config = Configuration() self.broker = config.config["broker_host"] self.port = config.config["broker_port"] logging.info(self.broker, self.port) self.client = mqtt.Client() self.client.connect(self.broker, self.port) self.client.on_message = self.on_message self.client.subscribe(config.config["config_topic"], qos=1) self.client.loop_start() self.pubsub = pubsub self.pubsub.on_connect = self.on_connect self.pubsub.on_message = self.on_message pass
def __init__(self, parent=None): QtWidgets.QDialog.__init__(self, parent) # temporary Configuration which saves the current changes self._tmp = Configuration() self._tmp.read() vbox = QtWidgets.QVBoxLayout() tab_widget = QtWidgets.QTabWidget() graphics_page = GraphicsSettingsPage(tab_widget, self._tmp) path_page = PathSettingsPage(tab_widget, self._tmp) comp_page = ComputationSettingsPage(tab_widget, self._tmp) self.cutoff_history_entries_for_deletion = lambda: comp_page.tw_cutoff_history.history_entries_for_deletion self.cutoff_preset_entries_for_deletion = lambda: comp_page.tw_cutoff_presets.preset_entries_for_deletion # Ok, Cancel and Restore defaults Buttons ok = QtWidgets.QPushButton('Ok', self) cancel = QtWidgets.QPushButton('Cancel', self) restore_btn = QtWidgets.QPushButton('Restore defaults', self) ok.clicked.connect(self.ok) cancel.clicked.connect(self.cancel) restore_btn.clicked.connect(self.restore_defaults) tab_widget.addTab(graphics_page, 'graphics') tab_widget.addTab(path_page, 'path') tab_widget.addTab(comp_page, 'computation') vbox.addWidget(tab_widget) hbox = QtWidgets.QHBoxLayout() hbox.addStretch() hbox.addWidget(ok) hbox.addStretch() hbox.addWidget(cancel) hbox.addStretch() hbox.addWidget(restore_btn) hbox.addStretch() vbox.addLayout(hbox) self.setLayout(vbox) self.exec_()
def test_obd(): config = Configuration() config.read('config/test_config.ini') assert config.obd_device == 'test_obd_device' assert config.rpm_interval == 0.8 assert config.speed_interval == 0.9
from config.configuration import Configuration from datetime import datetime from dateutil import parser from models import EmployesModel from helpers.mysql_alchemy import mysql_alchemy as db configuration = Configuration() # referensi query model sql alchemy # https://docs.sqlalchemy.org/en/latest/orm/query.html class EmployeServices(object): def __init__(self, **kwargs): pass def create(self, json: dict): try: # generate json data json_send = {} json_send = json # prepare data model result = EmployesModel(**json_send) # execute database db.session.add(result) db.session.commit() # check if exist
def log_configuration(): return Configuration(configuration.get_not_none_property("log"))
def main(): start_time = time.time() # Prepare configutation print('Loading configuration ...') config = Configuration() cf = config.Load() # Enable log file logger_debug = Logger(cf.log_file_debug) logger_debug.write('\n ---------- Init experiment: ' + cf.exp_name + ' ---------- \n') # Model building logger_debug.write('- Building model: ' + cf.model_name + ' <--- ') model = Model_builder(cf) model.build() # Problem type if cf.problem_type == 'segmentation': problem_manager = SemanticSegmentation_Manager(cf, model) elif cf.problem_type == 'classification': problem_manager = Classification_Manager(cf, model) elif cf.problem_type == 'detection': problem_manager = Detection_Manager(cf, model) else: raise ValueError('Unknown problem type') # Create dataloader builder dataloader = Dataloader_Builder(cf, model) if cf.train: model.net.train() # enable dropout modules and others train_time = time.time() # Dataloaders logger_debug.write('\n- Reading Train dataset: ') dataloader.build_train() if cf.valid_images_txt is not None and cf.valid_gt_txt is not None and cf.valid_samples_epoch != 0: logger_debug.write('\n- Reading Validation dataset: ') dataloader.build_valid(cf.valid_samples_epoch, cf.valid_images_txt, cf.valid_gt_txt, cf.resize_image_valid, cf.valid_batch_size) problem_manager.trainer.start(dataloader.train_loader, dataloader.train_set, dataloader.loader_set, dataloader.loader) else: # Train without validation inside epoch problem_manager.trainer.start(dataloader.train_loader, dataloader.train_set) train_time = time.time() - train_time logger_debug.write('\t Train step finished: %ds ' % (train_time)) if cf.validation: valid_time = time.time() model.net.eval() if not cf.train: logger_debug.write('- Reading Validation dataset: ') dataloader.build_valid(cf.valid_samples, cf.valid_images_txt, cf.valid_gt_txt, cf.resize_image_valid, cf.valid_batch_size) else: # If the Dataloader for validation was used on train, only update the total number of images to take dataloader.loader_set.update_indexes( cf.valid_samples, valid=True) #valid=True avoids shuffle for validation logger_debug.write('\n- Starting validation <---') problem_manager.validator.start(dataloader.loader_set, dataloader.loader, 'Validation') valid_time = time.time() - valid_time logger_debug.write('\t Validation step finished: %ds ' % (valid_time)) if cf.test: model.net.eval() test_time = time.time() logger_debug.write('\n- Reading Test dataset: ') dataloader.build_valid(cf.test_samples, cf.test_images_txt, cf.test_gt_txt, cf.resize_image_test, cf.test_batch_size) logger_debug.write('\n - Starting test <---') problem_manager.validator.start(dataloader.loader_set, dataloader.loader, 'Test') test_time = time.time() - test_time logger_debug.write('\t Test step finished: %ds ' % (test_time)) if cf.predict_test: model.net.eval() pred_time = time.time() logger_debug.write('\n- Reading Prediction dataset: ') dataloader.build_predict() logger_debug.write('\n - Generating predictions <---') problem_manager.predictor.start(dataloader.predict_loader) pred_time = time.time() - pred_time logger_debug.write('\t Prediction step finished: %ds ' % (pred_time)) total_time = time.time() - start_time logger_debug.write('\n- Experiment finished: %ds ' % (total_time)) logger_debug.write('\n')
def main(): start_time = time.time() # Input arguments parser = argparse.ArgumentParser(description="TensorFlow framework for Semantic Segmentation") parser.add_argument("--config_file", type=str, default='config/configFile.py', help="configuration file path") parser.add_argument("--exp_name", type=str, default='Sample', help="Experiment name") parser.add_argument("--exp_folder", type=str, default='/home/jlgomez/Experiments/', help="Experiment folder path") args = parser.parse_args() # Prepare configutation print ('Loading configuration ...') config = Configuration(args.config_file, args.exp_name, args.exp_folder) cf = config.Load() # Enable log file logger_debug = Logger(cf.log_file_debug) logger_debug.write('\n ---------- Init experiment: ' + cf.exp_name + ' ---------- \n') # Model building logger_debug.write('- Building model: ' + cf.model_name + ' <--- ') model = Model_builder(cf) model.build() # Problem type if cf.problem_type == 'segmentation': problem_manager = SemanticSegmentation_Manager(cf, model) elif cf.problem_type == 'classification': problem_manager = Classification_Manager(cf, model) else: raise ValueError('Unknown problem type') # Loss definition criterion = Loss_Builder(cf).build().cuda() # Optimizer definition optimizer = Optimizer_builder().build(cf, model.net) # Learning rate scheduler scheduler = scheduler_builder().build(cf, optimizer) # Create dataloader builder dataloader = Dataloader_Builder(cf) if cf.train: model.net.train() # enable dropout modules and others train_time = time.time() # Dataloaders logger_debug.write('\n- Reading Train dataset: ') dataloader.build_train() if cf.valid_images_txt is not None and cf.valid_gt_txt is not None and cf.valid_samples_epoch != 0: logger_debug.write('\n- Reading Validation dataset: ') dataloader.build_valid(cf.valid_samples_epoch, cf.valid_images_txt, cf.valid_gt_txt, cf.resize_image_valid, cf.valid_batch_size) problem_manager.trainer.start(criterion, optimizer, dataloader.train_loader, dataloader.train_set, dataloader.loader_set, dataloader.loader, scheduler) else: # Train without validation inside epoch problem_manager.trainer.start(criterion, optimizer, dataloader.train_loader, dataloader.train_set, scheduler=scheduler) train_time = time.time() - train_time logger_debug.write('\t Train step finished: %ds ' % (train_time)) if cf.validation: valid_time = time.time() model.net.eval() if not cf.train: logger_debug.write('- Reading Validation dataset: ') dataloader.build_valid(cf.valid_samples,cf.valid_images_txt, cf.valid_gt_txt, cf.resize_image_valid, cf.valid_batch_size) else: # If the Dataloader for validation was used on train, only update the total number of images to take dataloader.loader_set.update_indexes(cf.valid_samples, valid=True) #valid=True avoids shuffle for validation logger_debug.write('\n- Starting validation <---') problem_manager.validator.start(criterion, dataloader.loader_set, dataloader.loader) valid_time = time.time() - valid_time logger_debug.write('\t Validation step finished: %ds ' % (valid_time)) if cf.test: model.net.eval() test_time = time.time() logger_debug.write('\n- Reading Test dataset: ') dataloader.build_valid(cf.test_samples, cf.test_images_txt, cf.test_gt_txt, cf.resize_image_test, cf.test_batch_size) logger_debug.write('\n - Starting test <---') problem_manager.validator.start(criterion, dataloader.loader_set, dataloader.loader) test_time = time.time() - test_time logger_debug.write('\t Test step finished: %ds ' % (test_time)) if cf.predict_test: model.net.eval() pred_time = time.time() logger_debug.write('\n- Reading Prediction dataset: ') dataloader.build_predict() logger_debug.write('\n - Generating predictions <---') problem_manager.predictor.start(dataloader.predict_loader) pred_time = time.time() - pred_time logger_debug.write('\t Prediction step finished: %ds ' % (pred_time)) total_time = time.time() - start_time logger_debug.write('\n- Experiment finished: %ds ' % (total_time)) logger_debug.write('\n')
import smtplib import ssl from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from config.configuration import Configuration # TODO add a check whether the email was received by the client # initializing config variables config = Configuration().config PORT = config['mailing']['port'] SMTP_SERVER = config['mailing']['smtp_server'] SENDER_EMAIL = config['mailing']['sender_email'] PASSWORD = config['mailing']['password'] def mail(recipient_email, message=None): context = ssl.create_default_context() with smtplib.SMTP_SSL(SMTP_SERVER, PORT, context=context) as server: server.login(SENDER_EMAIL, PASSWORD) server.sendmail(SENDER_EMAIL, recipient_email, message.as_string()) def mailOTP(session, otp): recipient_email = session['email'] recipient_name = session['name'] message = MIMEMultipart('alternative') message['Subject'] = 'OTP - SCC Website' message['From'] = SENDER_EMAIL
if __name__ == '__main__': parser = argparse.ArgumentParser(description='Model training') parser.add_argument('-c', '--config_path', type=str, default=None, help='Configuration file') parser.add_argument('-a', '--action', type=str, default=None, help='train or test') args = parser.parse_args() cf = Configuration(args.config_path).load() if args.action is 'train': model = Unet(n_class=cf.num_classes, dropout=cf.dropout, batch_norm=True).get_unet() if cf.input_weights is not None: model.load_weights(cf.input_weights) only_parasite_generator = UNetGeneratorClass( n_class=args.num_classes, batch_size=args.batch_size, apply_augmentation=cf.parasite_augmentation, sampling_score=cf.parasite_score, data_path=cf.train_data_path,
def main(): # Define environment variables # Environment() # Get parameters from arguments parser = argparse.ArgumentParser(description='Model training') parser.add_argument('-c', '--config_path', type=str, default=None, help='Configuration file') parser.add_argument('-e', '--exp_name', type=str, default=None, help='Name of the experiment') parser.add_argument('-s', '--shared_path', type=str, default='/data', help='Path to shared data folder') parser.add_argument('-l', '--local_path', type=str, default='/datatmp', help='Path to local data folder') arguments = parser.parse_args() assert arguments.config_path is not None, 'Please provide a configuration'\ 'path using -c config/pathname'\ ' in the command line' assert arguments.exp_name is not None, 'Please provide a name for the '\ 'experiment using -e name in the '\ 'command line' # Start Time print('\n > Start Time:') print(' ' + datetime.now().strftime('%a, %d %b %Y-%m-%d %H:%M:%S')) start_time = time.time() # Define the user paths shared_path = arguments.shared_path local_path = arguments.local_path dataset_path = os.path.join(local_path, 'Datasets') shared_dataset_path = os.path.join(shared_path, 'Datasets') experiments_path = os.path.join(local_path, 'Experiments') shared_experiments_path = os.path.join(shared_path, 'Experiments') usr_path = os.path.join('/home/', getuser()) # Load configuration files configuration = Configuration(arguments.config_path, arguments.exp_name, dataset_path, shared_dataset_path, experiments_path, shared_experiments_path, usr_path) cf = configuration.load() configurationPATH(cf, dataset_path) # Train /test/predict with the network, depending on the configuration process(cf) # Copy result to shared directory # configuration.copy_to_shared() # End Time end_time = time.time() print('\n > End Time:') print(' ' + datetime.now().strftime('%a, %d %b %Y-%m-%d %H:%M:%S')) print('\n ET: ' + HMS(end_time - start_time)) # -> H:M:S
else: raise ValueError('Unknow model') return model, optimizer if __name__ == '__main__': parser = argparse.ArgumentParser(description='Model training') parser.add_argument('-c', '--config_path', type=str, default=None, help='Configuration file') parser.add_argument('-e', '--experiment_num', type=int, default=None, help='Configuration file') parser.add_argument('-k', '--kfold', type=int, default=None, help='Configuration file') parser.add_argument('-a', '--action', type=str, default=None, help='bbox, centered_crop, divide, train or test') args = parser.parse_args() cf = Configuration(args.config_path, args.action).load() #if args.experiment_num is not None: # print("number of experiment: ", args.experiment_num) # TODO: handle the option for a particular train or test # Generate images from the bounding boxes of each corresponding mask if args.action == 'bbox': print("bounding box...") bbox = BBox() bbox.load(cf) bbox.make(cf) if args.action == 'centered_crop':
def main(): start_time = time.time() # Input arguments parser = argparse.ArgumentParser( description="TensorFlow framework for Semantic Segmentation") parser.add_argument("--config_file", type=str, default='config/configFile.py', help="configuration file path") parser.add_argument("--exp_name", type=str, default='Sample', help="Experiment name") parser.add_argument("--exp_folder", type=str, default='/home/jlgomez/Experiments/DenseNetFCN/', help="Experiment folder path") args = parser.parse_args() # Prepare configutation print('Loading configuration ...') config = Configuration(args.config_file, args.exp_name, args.exp_folder) cf = config.Load() sess = tf.Session() # training step if cf.train: #Create symbol builder with all the parameters needed (model, loss, optimizers,...) sb = Symbol_Builder(cf, cf.size_image_train, sess) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) #saver, sb = restore_session(cf, sess, sb) #merge all the previous summaries sb.tensorBoard.set_up() print('Starting training ...') Train(cf, sess, sb) # Validation step if cf.validation: if not cf.train: sb = Symbol_Builder(cf, cf.size_image_valid, sess) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) #saver, sb = restore_session(cf, sess, sb) #merge all the previous summaries sb.tensorBoard.set_up() print('Starting validation ...') Validation(cf, sess, sb) # Test step if cf.test: if not cf.train and not cf.validation: sb = Symbol_Builder(cf, cf.size_image_test, sess) sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) #saver, sb = restore_session(cf, sess, sb) #merge all the previous summaries sb.tensorBoard.set_up() print('Starting testing ...') if cf.predict_test: Predict(cf, sess, sb) else: Test(cf, sess, sb) total_time = time.time() - start_time print(' Experiment finished: %ds ' % (total_time))
def publishData(self, dict): config = Configuration() self.client.publish(config.config["data_topic"], json.dumps(dict)) pass