def __init__(self):
        GPIO.setup(self.binary_output_pins, GPIO.OUT)
        GPIO.setup(self.mux_enable_output_pins, GPIO.OUT)

        self.logger = Logger()
        self.__set_latch(self.binary_output_pins[0], 0)
        self.__set_latch(self.binary_output_pins[1], 0)
        self.__set_latch(self.binary_output_pins[2], 0)
        self.__set_latch(self.binary_output_pins[3], 0)
        self.close_boxes()
Beispiel #2
0
    def __init__(self):
        load_dotenv()
        Logger()

        self.logger = logging.getLogger("logger")
        self.username = os.getenv("USERNAME")
        self.password = os.getenv("PASSWORD")
        self.driverPath = os.getenv("CHROMEDRIVER_PATH")

        self.configureDriver()
 def __init__(self, file_path, schema_path, process):
     self._file_path = file_path
     self._schema_path = schema_path
     self._time_created = datetime.now()
     self._logger = Logger(
         f'validation_{process}_logs_{self._time_created.date()}_{self._time_created.strftime("%H%M%S")}.log'
     )
     self._path_to_good_files = None
     self._path_to_bad_files = None
     self.process = process
Beispiel #4
0
 def dilate_file(self, img, intensity):
     """
     Apply a dilation filter to the image
     :param img: The image path
     :param intensity: The intensity of the filter
     :return:
     """
     logger = Logger()
     kernel = np.ones((20, 20), np.uint8)
     logger.log(" Applying dilation filter...")
     return cv2.dilate(img, kernel, iterations=intensity)
 def __init__(self, lighting, demo_mode=False):
     self.logger = Logger()
     #self.demo_mode = demo_mode
     #For now, lets leave it as only counting one coin.
     self.demo_mode = True
     self.lighting = lighting
     GPIO.setup(self.coin_counter_pins, GPIO.OUT)
     GPIO.setup(self.coin_input_pin, GPIO.IN)
     GPIO.setup(self.coin_counter_input_pin, GPIO.IN)
     self.__set_coin_count(0)
     self.start_waiting_for_coin()
def get_is_admin(user_id):
    query_result = select(("Is_Admin", ), "Users", False, "WHERE User_ID=?",
                          (user_id, ))
    if query_result is None:
        #raise Exception("Invalid query attempted to run.")
        # TODO: this should throw an exception, but for now it will just be logged
        Logger().error("Invalid query attempted to run")
    elif query_result['Is_Admin'] > 0:
        return True

    return False
Beispiel #7
0
	def __init__(self, working_dir, log_dir, compare_file, parameter_format, logger=None):
		"""
		parameter_format specifies what parameters from which files are needed.
		[file, name, min, max, step]
		"""
		if logger == None:
			self.log = Logger()
		else:
			self.log = logger
		self.error = False
		self.working_dir = working_dir
		self.log_dir = log_dir
		self.kiva_path = '../ext'
		self.kiva_name = 'kiva_0D'
		self.kiva_parameter_files = ['itape17', 'itape5', 'itapeERC', 'itapeRs']
		self.parameter_format = parameter_format
		self.log.debug("Parameter Format: %s" % self.parameter_format)
		# this will later hold all kiva_parameter_files that
		# need to be loaded as ITapeFile objects
		self.itapes = {}
		self.compare_file = compare_file
		# Check if working directory exists and that it is empty
		self.working_dir_indicator = os.path.join(self.working_dir, 'kivagen.working.dir')
		if not os.path.isdir(self.working_dir):
			self.log.error("'%s' does not exist." % self.working_dir)
			self.error = True
			self.working_dir = None
		elif os.listdir(self.working_dir):	# not empty
			if not os.path.isfile(self.working_dir_indicator):
				self.log.error("'%s' does not seem to be a working directory." % self.working_dir)
				self.error = True
				self.working_dir = None
				return
		else:
			open(self.working_dir_indicator, 'a').close()
		# Check that log dir exists
		if not os.path.isdir(self.log_dir):
			self.log.error("'%s' does not exist." % self.log_dir)
			self.error = True
			self.log_dir = None
		# Check if input files exist
		input_files = [self.compare_file]
		input_files.append(os.path.join(self.kiva_path, self.kiva_name))
		for parameter_file in self.kiva_parameter_files:
			input_files.append(os.path.join(self.kiva_path, parameter_file))
		for input_file in input_files:
			if not os.path.isfile(input_file):
				self.log.error("'%s' not found." % input_file)
				self.error = True
		# Parse compare file
		self.compare_values = self._readCompareFile(self.compare_file)
		# self.log.debug("self.compare_values = %s" % self.compare_values)
		# Load Parameters
		self._loadParameters()
Beispiel #8
0
 def __init__(self, jobDict=None):
     self.__name__ = "Bob"
     self.log = Logger(self.__name__)
     self.status = "OPEN"
     if jobDict:
         self.id = jobDict["id"]
         self.startTime = dt.strptime(jobDict["start"], "%H%M %Z").time()
         self.endTime = dt.strptime(jobDict["end"], "%H%M %Z").time()
         self.command = jobDict["command"]
         self.priority = jobDict["priority"]
         self.notify = jobDict["notify"]
Beispiel #9
0
 def __init__(self):
     self.__CONFIG = "../config/config.config"
     self.__APP = FastAPI()
     self.__HOST = ''
     self.__PORT = ''
     self.__LOGIN = Login(username='', password='')
     self.__APP.add_middleware(CORSMiddleware,
                               allow_origins=["*"],
                               allow_methods=["*"],
                               allow_headers=["*"])
     self.__get_config()
     self.__LOGGER = Logger('restlin')
Beispiel #10
0
def make_user_admin():
    if not is_admin():
        return not_admin_redirect()

    if request.method != 'POST':
        return '400'

    user_id = get_request_field_data('user_id')

    update_query.change_user_admin_status(user_id, True)
    Logger().log("User with id=" + str(user_id) + " is now admin")

    return '200'
Beispiel #11
0
 def blur_image(self, img, intensity):
     """
     Apply a blur filter to the image
     :param intensity: The intensity of the filter
     :param img: The image path
     :return:
     """
     if intensity % 2 == False:
         intensity += 1
     log = Logger()
     log.log(" Applying a blur filter...")
     # Blur the image with gaussian filter
     return cv2.GaussianBlur(img, (intensity, intensity), 0)
Beispiel #12
0
 def __init__(self, **kwargs):
     self.__logger = Logger()
     if 'lang_id':
         self._lang_id = kwargs['lang_id']
     else:
         raise KeyError('lang_id')
     if 'path' in kwargs:
         if kwargs['path'][-1] == '/':
             self._path = kwargs['path'][:-1]
         else:
             self._path = kwargs['path']
         self.__lang_dir_path = self.__make_lang_dir(self._path)
     if 'lang_id' in kwargs:
         self._lang_id = kwargs['lang_id']
Beispiel #13
0
def main(config, resume):
    train_logger = Logger()

    # load data
    train_dataloader = ICDARDataLoader(config).train()

    # initial model
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(i) for i in config['gpus']])
    model = Model(config)
    model.summary()

    loss = Loss()
    trainer = Trainer(model, loss, resume, config, train_dataloader, train_logger)
    trainer.train()
Beispiel #14
0
 def forbid_root(logger=None):
     '''
     Target:
         - stop the execution of the program if this is being run by "root".
     '''
     if not logger:
         logger = Logger()
     try:
         if getuser() == 'root':  # Get system username
             raise Exception()
     except Exception as e:
         logger.debug('Error en la función "forbid_root": {}.'.format(
             str(e)))
         logger.stop_exe(Messenger.ROOT_NOT_ALLOWED)
    def __init__(self,
                 connecter=None,
                 new_dbname='',
                 original_dbname='',
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)

        # First check whether the name of the copy already exists in PostgreSQL
        self.connecter.cursor.execute(Queries.PG_DB_EXISTS, (new_dbname, ))
        # Do not replicate if the name already exists
        result = self.connecter.cursor.fetchone()
        if result:
            msg = Msg.DB_ALREADY_EXISTS.format(dbname=new_dbname)
            self.logger.stop_exe(msg)

        if new_dbname:
            self.new_dbname = new_dbname
        else:
            self.logger.stop_exe(Msg.NO_NEW_DBNAME)

        # First check whether the name of the source exists in PostgreSQL
        self.connecter.cursor.execute(Queries.PG_DB_EXISTS,
                                      (original_dbname, ))
        result = self.connecter.cursor.fetchone()
        if not result:
            msg = Msg.DB_DOES_NOT_EXIST.format(dbname=original_dbname)
            self.logger.stop_exe(msg)

        if original_dbname:
            self.original_dbname = original_dbname
        else:
            self.logger.stop_exe(Msg.NO_ORIGINAL_DBNAME)

        msg = Msg.REPLICATOR_VARS.format(server=self.connecter.server,
                                         user=self.connecter.user,
                                         port=self.connecter.port,
                                         original_dbname=self.original_dbname,
                                         new_dbname=self.new_dbname)
        self.logger.debug(Msg.REPLICATOR_VARS_INTRO)
        self.logger.debug(msg)
Beispiel #16
0
def main():
    # change to get arguments from command line
    data_folder = "dataset/data/cifar-10"
    input_folder = data_folder + "/train"
    label_file = data_folder + "/trainLabels.csv"
    batch_size = 128

    dataset = Dataset(data_folder, input_folder, label_file, True)

    TrainingSet = DataIterator(dataset.train_x, dataset.train_y, batch_size)
    TestSet = DataIterator(dataset.test_x, dataset.test_y, batch_size)

    config = {
        "learning_rate": 0.001,
        "other": 1,
        "checkpoint_path": "checkpoint/test",
        "checkpoint_name": "test",
        "summary_path": "graphs/test",
        "training": False,
        "epochs": 55
    }
    try:
        create_directories(["checkpoint", "graphs"])
    except:
        print("Cannot create directory")
    tf.reset_default_graph()

    model = TestModel(config)
    model.build_model()

    device = Device()
    session = device.get_session()

    session.run(tf.global_variables_initializer())

    saver = tf.train.Saver()
    if config["training"]:
        model.restore_model(saver, session)

        logger = Logger(session, config)
        trainer = Train(session, model, saver, TrainingSet, TestSet, logger)

        trainer.train(config["epochs"])
    else:
        inference = Inference(session, model, config, saver)
        inference.load_from_saved_model(saver)

        prediction = inference.prediction(TestSet)
        print(prediction)
Beispiel #17
0
    def __init__(self,
                 data_name="ml-1m",
                 model_name="SASRec",
                 min_user_number=5,
                 min_item_number=5):
        super(abstract_model, self).__init__()
        self.data_name = data_name
        self.model_name = model_name
        self.min_user_number = min_user_number
        self.min_item_number = min_item_number

        road = os.path.abspath(os.path.join(os.getcwd()))
        localtime = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
        self.logger = Logger("%s\log\%s\%s" %
                             (road, self.model_name, str(localtime)))
Beispiel #18
0
 def __init__(self):
     GPIO.cleanup()
     GPIO.setmode(GPIO.BOARD)
     self.logger = Logger()
     self.__init_pins()
     self.box_controller = BinaryBoxController()
     self.printer = Printer()
     LightSystemManager.setup()
     #depracted by lighting system manager
     self.lighting = LightingController()
     self.adventure_knob_a = BinaryKnob(self.box_select_pins_a)
     self.adventure_knob_b = BinaryKnob(self.box_select_pins_b)
     self.coin_machine = CoinMachine(self.lighting, self.demo_mode)
     self.server = api.run.ServerController()
     self.api = api.run
Beispiel #19
0
    def __init__(self, logger_desired_headers=None, use_rtg=True):

        # Enable or disable each tool based on parameter choice

        if not logger_desired_headers:
            logging.critical("Splitter: Logger disabled.")
            self.logger = None
        else:
            self.logger = Logger(logger_desired_headers)

        if not use_rtg:
            logging.warning("Splitter: RTG Disabled.")
            self.ipc = None
        else:
            self.ipc = IPC()
    def get_filtered_dbnames(dbs_all, in_dbs=[], ex_dbs=[], in_regex='',
                             ex_regex='', in_priority=False, logger=None):
        '''
        Target:
            - filter a list of databases' names taking into account inclusion
              and exclusion parameters and their priority.
        Parameters:
            - dbs_all: list to filter.
            - in_dbs: list with the databases' names to include.
            - ex_dbs: list with the databases' names to exclude.
            - in_regex: regular expression which indicates the databases' names
              to include.
            - ex_regex: regular expression which indicates the databases' names
              to exclude.
            - in_priority: a flag which determinates if the inclusion
              parameters must predominate over the exclusion ones.
            - logger: a logger to show and log some messages.
        Return:
            - a filtered list (subset of "dbs_all").
        '''
        if not logger:
            logger = Logger()

        bkp_list = []

        if in_priority:  # If inclusion is over exclusion
            # Apply exclusion first and then inclusion
            bkp_list = DbSelector.dbname_filter_exclude(dbs_all, ex_dbs,
                                                        ex_regex, logger)
            bkp_list = DbSelector.dbname_filter_include(bkp_list, in_dbs,
                                                        in_regex, logger)
        else:
            # Apply inclusion first and then exclusion
            bkp_list = DbSelector.dbname_filter_include(dbs_all, in_dbs,
                                                        in_regex, logger)
            bkp_list = DbSelector.dbname_filter_exclude(bkp_list, ex_dbs,
                                                        ex_regex, logger)

        logger.highlight('info', Messenger.SEARCHING_SELECTED_DBS, 'white')

        if bkp_list == []:
            logger.highlight('warning', Messenger.EMPTY_DBNAME_LIST, 'yellow',
                             effect='bold')
        else:
            for dbname in bkp_list:
                logger.info(Messenger.SELECTED_DB.format(dbname=dbname))
        return bkp_list
Beispiel #21
0
    def __init__(self,
                 connecter=None,
                 in_dbs=[],
                 old_role='',
                 new_role='',
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)

        if isinstance(in_dbs, list):
            self.in_dbs = in_dbs
        else:
            self.in_dbs = Casting.str_to_list(in_dbs)

        if old_role:
            self.old_role = old_role
        else:
            self.logger.stop_exe(Msg.NO_OLD_ROLE)

        if not new_role:
            self.logger.stop_exe(Msg.NO_NEW_ROLE)
        # First check whether the user exists in PostgreSQL or not
        self.connecter.cursor.execute(Queries.PG_USER_EXISTS, (new_role, ))
        # Do not alter database if the user does not exist
        result = self.connecter.cursor.fetchone()
        if result:
            self.new_role = new_role
        else:
            msg = Msg.USER_DOES_NOT_EXIST.format(user=new_role)
            self.logger.stop_exe(msg)

        msg = Msg.ALTERER_VARS.format(server=self.connecter.server,
                                      user=self.connecter.user,
                                      port=self.connecter.port,
                                      in_dbs=self.in_dbs,
                                      old_role=self.old_role,
                                      new_role=self.new_role)
        self.logger.debug(Msg.ALTERER_VARS_INTRO)
        self.logger.debug(msg)
Beispiel #22
0
def main(args):
    logger = Logger()
    if args.dataset != 'func':
        loss = cross_entropy_loss
        metrics = [accuracy]
        if args.arch[:6] == 'deeper':
            model = eval(args.arch[:6].title() + args.dataset.title() + 'CNN')(
                int(args.arch[6:]))
            identifier = type(
                model).__name__ + args.arch[6:] + '_' + args.dataset + '_'
        else:
            model = eval(args.arch.title() + args.dataset.title() + 'CNN')()
            identifier = type(model).__name__ + '_' + args.dataset + '_'
        data_loader = eval(args.dataset.title() + 'Loader')(args.batch_size,
                                                            args.rand_label,
                                                            args.noise)
    else:
        loss = mse_loss
        metrics = []
        model = eval(args.arch.title() + 'FC')()
        data_loader = FunctionDataLoader(args.target_func,
                                         batch_size=args.batch_size,
                                         n_sample=1024,
                                         x_range=(0, 1))
        identifier = type(model).__name__ + '_' + args.target_func + '_'

    model.summary()
    optimizer = optim.Adam(model.parameters())
    data_loader, valid_data_loader = split_validation(data_loader,
                                                      args.validation_split)
    trainer = Trainer(model,
                      loss,
                      metrics,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      optimizer=optimizer,
                      epochs=args.epochs,
                      logger=logger,
                      save_dir=args.save_dir,
                      save_freq=args.save_freq,
                      resume=args.resume,
                      verbosity=args.verbosity,
                      identifier=identifier,
                      with_cuda=not args.no_cuda,
                      save_grad=args.save_grad)
    trainer.train()
    print(logger)
def dependancy_check(dependancy_list):
    """Check every dependancy and inform anyone missing."""
    log = Logger(stdout)

    # Store missing modules.
    missing_modules = list()

    for dependancy in dependancy_list:
        try:
            find_module(dependancy)
        except ImportError, err:
            # Add the missing module to the return list.
            missing_modules.append(dependancy)

            # Inform the user about the missing module.
            log.critical("Module unavailable : %s (%s)" % \
                (dependancy, dependancy_list[dependancy]))
Beispiel #24
0
def main():
    l = Logger(__name__)
    osqonnector = Bottle()

    for app in INSTALLED_APPS:
        l.debug("loading {}".format(app.__name__))
        osqonnector.merge(app.app)

    l.debug("[{}]: ready to serv ({}:{})".format(getpid(), config.HOST,
                                                 config.PORT))
    try:
        bjoern.run(osqonnector,
                   config.HOST,
                   config.PORT,
                   reuse_port=config.REUSE_PORT)
    except KeyboardInterrupt:
        l.info("bye.")
Beispiel #25
0
 def __init__(self, training_db_path):
     self._training_db_path = training_db_path
     self._time_created = datetime.now()
     self._logger = Logger(
         f'training_logs_{self._time_created.date()}_{self._time_created.strftime("%H%M%S")}.log'
     )
     date = self._time_created.date()
     time = self._time_created.strftime("%H%M%S")
     # self._db_handler = SqliteDbHandler(self._logger,self._training_db_path,f'training_db_{date}_{time}')
     self._db_handler = SqliteDbHandler(self._logger,
                                        self._training_db_path,
                                        f'training_db')
     self._cluster_controller = None
     self._path_to_artifacts = os.path.join('.', 'artifacts')
     fileutils = FileUtils(self._logger, self._path_to_artifacts)
     self.model_repo_path = fileutils.create('models',
                                             delete_before_creation=True)
Beispiel #26
0
def TestPatchLevelSubDatasetGeneration(args):
    # set up logger
    logger = Logger(args.dst_data_root_dir)

    # calculate sub_dataset_number based on src_radiograph_level_sub_datasets_root_dir
    sub_dataset_number = len([
        lists for lists in os.listdir(
            args.src_radiograph_level_sub_datasets_root_dir) if os.path.isdir(
                os.path.join(args.src_radiograph_level_sub_datasets_root_dir,
                             lists))
    ])

    for patch_type in ['positive_patches', 'negative_patches']:

        for dataset_type in ['training', 'validation', 'test']:

            for sub_dataset_idx in range(sub_dataset_number):
                sub_dataset_name = 'sub-dataset-{}'.format(sub_dataset_idx + 1)

                src_patch_level_dataset_type_dir = os.path.join(
                    args.src_patch_level_data_root_dir, patch_type,
                    dataset_type)
                src_radiograph_level_dataset_type_dir = os.path.join(
                    args.src_radiograph_level_sub_datasets_root_dir,
                    sub_dataset_name, dataset_type)
                dst_patch_level_dataset_type_dir = os.path.join(
                    args.dst_data_root_dir, sub_dataset_name, patch_type,
                    dataset_type)

                sub_patch_level_filename_list = get_sub_dataset_filename_list(
                    src_patch_level_dataset_type_dir,
                    src_radiograph_level_dataset_type_dir,
                    patch_type,
                    dataset_type,
                    sub_dataset_idx,
                    logger=None)

                copy_data_from_src_2_dst(src_patch_level_dataset_type_dir,
                                         dst_patch_level_dataset_type_dir,
                                         sub_patch_level_filename_list,
                                         sub_dataset_idx,
                                         dataset_type,
                                         patch_type,
                                         logger=logger)
    return
Beispiel #27
0
def main():
    d = date(2021, 1, 22)
    PARAMS = d.strftime('%Y%m%d')
    URL = settings.LOGS_URL

    # Creating object for logs handling
    log_handler = Logger()

    # Getting data from URL (default: settings.LOGS_URL) with PARAMS.
    # Saving data in log_handler.logs
    data = log_handler.get_data(PARAMS, URL)

    # Sorting data by key (default: 'created_at').
    # This method changes log_handler.logs
    log_handler.sort_data('created_at')

    # Saving data to database:
    log_handler.save_to_db()
Beispiel #28
0
    def create_dir(path, logger=None):
        '''
        Target:
            - stop the execution of the program if this is being run by "root".
        Parameters:
            - path: directory to create.
            - logger: a logger to show and log some messages.
        '''
        if not logger:
            logger = Logger()

        try:
            if not os.path.exists(path):  # If path does not exist...
                os.makedirs(path)  # Create it
        except Exception as e:
            logger.debug('Error en la función "create_dir": {}.'.format(
                str(e)))
            logger.stop_exe(Messenger.USER_NOT_ALLOWED_TO_CHDIR)
    def __init__(self,
                 connecter,
                 target_all=False,
                 target_user='',
                 target_dbs=[],
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        if target_all is None:
            self.target_all = target_all
        elif isinstance(target_all, bool):
            self.target_all = target_all
        elif Checker.str_is_bool(target_all):
            self.target_all = Casting.str_to_bool(target_all)
        else:
            self.logger.stop_exe(Messenger.INVALID_TARGET_ALL)

        self.target_user = target_user

        if target_dbs is None:
            self.target_dbs = []
        elif isinstance(target_dbs, list):
            self.target_dbs = target_dbs
        else:
            self.target_dbs = Casting.str_to_list(target_dbs)

        message = Messenger.TERMINATOR_VARS.format(
            server=self.connecter.server,
            user=self.connecter.user,
            port=self.connecter.port,
            target_all=self.target_all,
            target_user=target_user,
            target_dbs=self.target_dbs)
        self.logger.debug(Messenger.TERMINATOR_VARS_INTRO)
        self.logger.debug(message)
def TestImageLevelDatasetCropAndSplit(args):
    image_data_root_dir = os.path.join(args.src_data_root_dir, 'images')
    label_data_root_dir = os.path.join(args.src_data_root_dir, 'labels')

    assert os.path.exists(
        image_data_root_dir), 'Source image data root dir does not exist.'
    assert os.path.exists(
        label_data_root_dir), 'Source label data root dir does not exist.'

    # set up logger
    logger = Logger(args.dst_data_root_dir)

    image_list_training, \
    image_list_val, \
    image_list_test = image_filename_list_split(image_data_root_dir,
                                                training_ratio=args.training_ratio,
                                                validation_ratio=args.validation_ratio,
                                                test_ratio=args.test_ratio,
                                                random_seed=args.random_seed,
                                                logger=logger)

    crop_and_save_data(filename_list=image_list_training,
                       image_dir=image_data_root_dir,
                       label_dir=label_data_root_dir,
                       save_path=args.dst_data_root_dir,
                       dataset_type='training',
                       intensity_threshold=args.intensity_threshold,
                       logger=logger)
    crop_and_save_data(filename_list=image_list_val,
                       image_dir=image_data_root_dir,
                       label_dir=label_data_root_dir,
                       save_path=args.dst_data_root_dir,
                       dataset_type='validation',
                       intensity_threshold=args.intensity_threshold,
                       logger=logger)
    crop_and_save_data(filename_list=image_list_test,
                       image_dir=image_data_root_dir,
                       label_dir=label_data_root_dir,
                       save_path=args.dst_data_root_dir,
                       dataset_type='test',
                       intensity_threshold=args.intensity_threshold,
                       logger=logger)

    return