def get_xfrs_token(page_html):
    """
    Method to parse a D2L page to find the XSRF.Token. The token is returned as a string
    :param page_html:
    :return:
    """
    soup = BeautifulSoup(page_html, "html.parser")
    # TODO Loop over all of them, as the location might change
    xsrf = str(soup.findAll("script")[0]).splitlines()
    token = None

    for line in xsrf:
        if "XSRF.Token" in line:  #
            line_soup = re.findall("'(.*?)'", line)
            # We can also find our User.ID in this line as well
            for i in range(0, len(line_soup)):
                if line_soup[i] == 'XSRF.Token':
                    token = line_soup[i + 1]
                    break

    if token is None:
        logger.critical("Cannot find XSRF.Token. Code might have changed")
        exit(1)
    logger.debug("Found XSRF.Token. It's {}".format(token))

    return token
Exemple #2
0
def set_vs_code_theme(mode):
    abs_path = expanduser(VS_CODE_SETTINGS_FILE)
    try: # read vs code json file
        with open(abs_path) as _vs_s_f:
            _vs_code_json = json.load(_vs_s_f)
    except FileNotFoundError:
        logger.warning(f'{abs_path} not found, \
            VS Code theme will not be changed')
        return
    if 'workbench.colorTheme' not in _vs_code_json:
        logger.warning('key: "workbench.colorTheme" not found in JSON')
        return
    try: # attempt to backup file
        copyfile(abs_path, '{abs_path}.bkp')
        logger.info(f'VS Code settings backed up to: "{abs_path}.bkp"')
    except Exception as _e:
        logger.critical('Could not create a backup copy of VS Code settings, aborting.')
        logger.debug(_e)
        raise
    # set the theme
    _vs_code_json['workbench.colorTheme'] = VS_CODE_THEME[mode]
    # write the file back
    with open(abs_path, 'w') as _vs_s_f:
        _vs_s_f.write(json.dumps(_vs_code_json, indent=2))
Exemple #3
0
def files_sorting(folder_all_notes_name, hashtag):
    notes_list = listdir(Path('Google Keep'))
    os.mkdir(Path(folder_all_notes_name, hashtag))

    notes_value_list = []
    notes_count = 0
    for file_name in notes_list:
        notes_count += 1
        logger.debug(f'Обработано заметок: {notes_count}/{len(notes_list)}')
        if '.json' in file_name:
            with open(Path(folder_all_notes_name, file_name),
                      'r',
                      encoding='UTF-8') as note_file:
                note_dict = json.load(note_file)

                try:
                    hashtag_value = note_dict['labels'][0]['name']

                except:
                    continue

                if hashtag_value == hashtag:
                    shutil.copy(Path(folder_all_notes_name, file_name),
                                Path(folder_all_notes_name, hashtag))

                    notes_value_list = check_is_pinned(note_dict,
                                                       notes_value_list)

                if 'attachments' in note_dict:
                    try:
                        pictures_collect(note_dict['attachments'])
                    except FileNotFoundError:
                        logger.critical(
                            'В папке нет указанных прикрепленных материалов')

    return notes_value_list
Exemple #4
0
def manage52WeekLowestPrice(param):
    """
        获取并保存个股52周最低价
    Parameters
    ------
        Dict
        code: str      股票代码 600030
    Return
    -------
        True 成功
        False 失败
    """
    try:
        code = param['code']
        delta = datetime.timedelta(weeks=52)
        past = int((datetime.date.today() - delta).strftime('%Y%m%d'))
        lowestPrice = dal.queryMinLowPrice('hisprice', code, past)
        if lowestPrice is None:
            logger.info(f'52 week low {code} --not found')
            return False

        redisData = redisDal.redisHGet('xueQiuStockSet', code)
        if redisData is None:
            redisObj = {'code': code, 'low': lowestPrice['min_value']}
        else:
            redisObj = json.loads(redisData)
            redisObj['low'] = lowestPrice['min_value']
        redisObj['lowGenDate'] = datetime.datetime.now().strftime(
            "%Y-%m-%d %H:%M:%S")
        if lowestPrice['min_value'] > 0:
            redisDal.redisHSet('xueQiuStockSet', code, json.dumps(redisObj))
            logger.info(f'52 week low {code} --{json.dumps(redisObj)}')
    except Exception as err:
        logger.critical(err)
        return False
    return True
Exemple #5
0
 def this(func, *args, **kwargs):
     logger.log("DEBUG", f"Entering: {func.__name__} [{func.__module__}]")
     start_time = time.time()
     result = func(*args, **kwargs)
     end_time = time.time()
     execution_time = round(end_time - start_time, 4)
     if execution_time > 3:
         if execution_time < 10:
             logger.warning(
                 f"Long execution time: {execution_time} [{func.__module__}.{func.__name__}]"
             )
         else:
             logger.critical(
                 f"Extra long execution time: {execution_time} [{func.__module__}.{func.__name__}]"
             )
     else:
         logger.log(
             "DEBUG",
             f"Execution time: {execution_time} [{func.__module__}.{func.__name__}]",
         )
     logger.log("DEBUG",
                f"Result: {result} [{func.__module__}.{func.__name__}]")
     logger.log("DEBUG", f"Exiting: {func.__name__} [{func.__module__}]")
     return result
Exemple #6
0
def latest_executeble(versions_dir, base_build=None):
    latest = None

    if base_build is not None:
        try:
            latest = (
                int(base_build[4:]),
                max(p for p in versions_dir.iterdir()
                    if p.is_dir() and p.name.startswith(str(base_build))),
            )
        except ValueError:
            pass

    if base_build is None or latest is None:
        latest = max((int(p.name[4:]), p) for p in versions_dir.iterdir()
                     if p.is_dir() and p.name.startswith("Base"))

    version, path = latest

    if version < 55958:
        logger.critical(
            f"Your SC2 binary is too old. Upgrade to 3.16.1 or newer.")
        exit(1)
    return path / BINPATH[PF]
Exemple #7
0
def by_id(db: Session, event_id: int) -> Event:
    """Get a single event by id"""
    if not isinstance(db, Session):
        error_message = (f'Could not connect to database. '
                         f'db instance type received: {type(db)}')
        logger.critical(error_message)
        raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                            detail=error_message)

    try:
        event = db.query(Event).filter_by(id=event_id).one()
    except NoResultFound:
        error_message = f"Event ID does not exist. ID: {event_id}"
        logger.exception(error_message)
        raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
                            detail=error_message)
    except MultipleResultsFound:
        error_message = (
            f'Multiple results found when getting event. Expected only one. '
            f'ID: {event_id}')
        logger.critical(error_message)
        raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                            detail=error_message)
    return event
Exemple #8
0
 def crash_report(self) -> str:
     """
     This should be called when handling an unexpected exception. Will create a new log file containing the last 50
     debug messages as well as the crash traceback.
     """
     if not self.unit_test:
         log_dir = os.path.dirname(self.log_filename)
         filename = os.path.join(
             log_dir,
             datetime.now().strftime('crash_report.%Y.%m.%d.%H%M%S%f.log'))
         with codecs.open(filename, 'w', encoding='utf-8') as outfile:
             outfile.writelines(flexget.log.debug_buffer)
             traceback.print_exc(file=outfile)
         logger.critical(
             'An unexpected crash has occurred. Writing crash report to {}. '
             'Please verify you are running the latest version of flexget by using "flexget -V" '
             'from CLI or by using version_checker plugin'
             ' at http://flexget.com/wiki/Plugins/version_checker. '
             'You are currently using version {}',
             filename,
             get_current_flexget_version(),
         )
     logger.opt(exception=True).debug('Traceback:')
     return traceback.format_exc()
Exemple #9
0
    def load(self, model: LSTMLayer) -> LSTMLayer:
        """Load the model. Note that the model must be created before. This function loads only the parameters inside the model.
        
        Args:
            model (LSTMLayer): object to use for loading the model.

        Raises:
            Exception: the file is not found

        Returns:
            LSTMLayer: the loaded model
        """
        if os.path.isfile(self.path):
            self.lock.acquire()
            with open(self.path, "rb") as output_file:
                dict_cardinalities_model = pickle.load(output_file)
            model.load_state_dict(
                dict_cardinalities_model["LSTM"][self.cardinality])
            self.lock.release()
            return model
        else:
            logger.critical("Trying to load an unknown file: " +
                            str(self.path))
            raise Exception("Trying to load an unknown file")
Exemple #10
0
 def __thread_main(self):
     """
     数据传输线程函数
     :return:
     """
     self.__t_flag = True
     log.info('%s started.' % current_thread().name)
     try:
         while self.__t_flag:
             events = self.__epoll.poll(1)
             if not events:
                 continue
             for fd, event in events:
                 if event & ~select.EPOLLIN:
                     if self.__remove_connection_pair(fd):
                         log.debug(
                             'Connection(fd=%d) closed, and connection pair removed.'
                             % fd)
                     else:
                         log.warning(
                             'Connection(fd=%d) closed, but connection pair not removed.'
                             % fd)
                 else:
                     sock_src = self.__fd_to_socket[fd]
                     sock_dst = self.__fd_to_socket[self.__fd_to_fd[fd]]
                     data = sock_src.recv(Protocol.SOCKET_BUFFER_SIZE)
                     if data:
                         sock_dst.sendall(data)
     except Exception as e:
         log.critical(e)
     finally:
         for key in self.__port_fds[self.__server_port][1:]:
             self.__remove_connection_pair(key)
     log.warning('%s exited.' % current_thread().name)
     self.__t_flag = False
     self.set_status(Module.Status.STOPPED)
Exemple #11
0
 def getKeyWordAtSearchbar(self):
     output = self.volumeInfo
     result = []
     try:
         bias = datetime.timedelta(hours=-self.bias)
     except TypeError:
         pass
     if "FAT" or "NTFS" in output.split(" ")[0]:
         os.chdir("%s/%s/" % (self.mountDir, output.split(" ")[2]))
         logger.info("Loading every user info!")  # TODO:It should be per user!
         try:
             os.chdir("Users/")
         except FileNotFoundError:
             logger.error("Couldn't find Users folder!")
             return None
         for userDir in os.listdir("."):
             if os.access("{0}/NTUSER.DAT".format(userDir), os.F_OK | os.R_OK):
                 registry = Registry.Registry("{0}/NTUSER.DAT".format(userDir))
             elif os.access("{0}/ntuser.dat".format(userDir), os.F_OK | os.R_OK):
                 registry = Registry.Registry("{0}/ntuser.dat".format(userDir))
             else:
                 logger.warning("Couldn't find user registry on %s" % userDir)
                 continue
             try:
                 open1 = registry.open("Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\WordWheelQuery")
             except Registry.RegistryKeyNotFoundException:
                 logger.error("Couldn't find UserAssist registry on user {0}".format(userDir))
                 continue
             logger.info("Now showing Windows Explorer searchbar info!")
             logger.info("Timestamp: " + (open1.timestamp() + bias).strftime('%Y %m %d - %H:%M:%S'))
             for item in open1.values():
                 if item.name() == 'MRUListEx':
                     continue
                 logger.info("KeyWords: %s" % (item.value().decode('utf-16-le')))
     logger.critical(result)
     return result
    async def message_handler(self, websocket):
        # Wait for messages from the remote client
        async for rawMessage in websocket:
            message = comms.DecodeMessage(rawMessage)

            if not "type" in message:
                logger.critical(
                    "Ignoring unknown message with no 'type' specifier. Message was {0}"
                    .format(message))
            elif message["type"] == "frame":
                # Do the synchronization analysis on the frame in this message
                pixelArrayObject = comms.ParseFrameMessage(message)
                if not "timestamp" in pixelArrayObject.metadata:
                    logger.critical(
                        "Received a frame that does not have compulsory metadata. We will ignore this frame."
                    )
                    continue
                logger.debug("Received frame with timestamp {0:.3f}".format(
                    pixelArrayObject.metadata["timestamp"]))
                if "sync" in pixelArrayObject.metadata:
                    logger.critical(
                        "Received a frame that already has 'sync' metadata. We will overwrite this!"
                    )
                pixelArrayObject.metadata["sync"] = dict()

                # JT TODO: for now I just hack self.width and self.height, but this should get fixed as part of the PixelArray refactor
                self.height, self.width = pixelArrayObject.shape
                self.analyze_pixelarray(pixelArrayObject)

                # Send back to the client the metadata we have added to the frame as part of the sync analysis.
                # This will include whether or not a trigger is predicted, and when.
                keys = [
                    "optical_gating_state", "unwrapped_phase",
                    "predicted_trigger_time_s", "trigger_type_sent"
                ]
                response_dict = dict()
                for k in keys:
                    if k in pixelArrayObject.metadata:
                        response_dict[k] = pixelArrayObject.metadata[k]

                returnMessage = comms.EncodeFrameResponseMessage(response_dict)
                await websocket.send(returnMessage)
            else:
                logger.critical("Ignoring unknown message of type {0}".format(
                    message["type"]))
Exemple #13
0
def get_number_response_analyze(response_get_number):
    if 'ACCESS_NUMBER' in response_get_number.text:
        logger.debug('Запрос прошел. Все ок')
        return True
    elif response_get_number.text == 'NO_BALANCE':
        logger.critical('Денег нет')
        exit_code()
    elif response_get_number.text == 'NO_NUMBERS':
        logger.debug('Нет номеров')
    elif response_get_number.text == 'BAD_KEY':
        logger.critical('Токен из файла inf.yml не работает')
        exit_code()
    elif 'BAD_STATUS' in response_get_number.text:
        logger.critical('Что-то не так с ID операции')
        exit_code()
    else:
        logger.critical(f'Что-то пошло не так. Ответ sms-activate: {response_get_number.text}')
        exit_code()
    return False
 def compile(self, *args, **kwargs):
     super(BertMultiTask, self).compile(*args, **kwargs)
     logger.critical('Initial lr: {}'.format(self.params.lr))
     logger.critical('Train steps: {}'.format(self.params.train_steps))
     logger.critical('Warmup steps: {}'.format(self.params.num_warmup_steps))
     self.optimizer, self.lr_scheduler = transformers.optimization_tf.create_optimizer(
         init_lr=self.params.lr,
         num_train_steps=self.params.train_steps,
         num_warmup_steps=self.params.num_warmup_steps,
         weight_decay_rate=0.01
     )
     self.mean_acc = tf.keras.metrics.Mean(name='mean_acc')
Exemple #15
0
 def get_extractWebCache(self):
     if self.IEVersion is None:
         return None
     if int(self.IEVersion.split(".")[0]) >= 10:
         output = self.volumeInfo
         if "FAT" or "NTFS" in output.split(" ")[0]:
             os.chdir("%s/%s" % (self.mountDir, output.split(" ")[2]))
             logger.info(
                 "Loading every user info!")  # TODO:It should be per user!
             try:
                 os.chdir("Users/")
             except FileNotFoundError:
                 logger.error("Couldn't find Users folder!")
                 return None
             for userDir in os.listdir("."):
                 try:
                     os.chdir("%s/%s" %
                              (self.mountDir, output.split(" ")[2]) +
                              "/Users/" + userDir +
                              "/AppData/Local/Microsoft/Windows/WebCache")
                     self._userList.append(userDir)
                 except (FileNotFoundError, NotADirectoryError):
                     logger.critical(
                         "Couldn't find WebCache on folder %s" %
                         ("%s/%s" % (self.mountDir, output.split(" ")[2]) +
                          "/Users/" + userDir +
                          "/AppData/Local/Microsoft/Windows/WebCache"))
                     continue
                 if os.access("WebCacheV01.dat", os.R_OK | os.F_OK):
                     logger.success(
                         "WebCacheV01.dat open on folder %s OK!" % userDir)
                 else:
                     logger.critical(
                         "WebCacheV01.dat open on folder %s failed!" %
                         userDir)
                     return None
                 logger.info("Now showing ESE DB info.")
                 logger.debug(
                     subprocess.getoutput("esedbinfo WebCacheV01.dat"))
                 logger.info("Now extracting ESE DB info.")
                 webcachePath = os.path.realpath("WebCacheV01.dat")
                 os.chdir(self.tempDir)
                 logger.debug(
                     subprocess.getoutput(
                         "esedbexport -v %s -t WebCacheV01.dat-%s" %
                         (webcachePath, userDir)))
                 if os.access("WebCacheV01.dat-%s.export" % userDir,
                              os.F_OK | os.R_OK):
                     logger.success("WebCache extract OK!")
                 else:
                     logger.critical("Error extracting!")
                     return None
             return "OK"
Exemple #16
0
 def read_configuration(self, config_path):
     logger.debug(f'Reading configuration file "{config_path}"')
     try:
         self.config.read(config_path)
     except Exception as e:
         logger.critical(f'Error reading configuration file; {e}')
         logger.critical('Closing...')
         exit(1)
     try:
         sections = self.config.sections()
         for section in CONFIGURATION_LAYOUT:
             assert section in sections
             for key in CONFIGURATION_LAYOUT[section]:
                 assert key in self.config[section]
     except AssertionError:
         logger.critical(
             f'Configuration file malformed, creating sample as "{DEFAULT_COMPLETE_CONFIG_PATH}"...'
         )
         for section in CONFIGURATION_LAYOUT:
             self.config[section] = {}
             for key in CONFIGURATION_LAYOUT[section]:
                 self.config[section][key] = f'<{key}>'
         try:
             if os.path.isfile(DEFAULT_COMPLETE_CONFIG_PATH):
                 logger.error(
                     "Can't create configuration sample, please provide a custom configuration file"
                 )
                 exit(1)
             with open(DEFAULT_COMPLETE_CONFIG_PATH, 'w') as file:
                 self.config.write(file)
         except Exception as e:
             logger.critical(
                 f"Can't create a config sample as '{DEFAULT_COMPLETE_CONFIG_PATH}' in working directory; {e}"
             )
         finally:
             exit(1)
     logger.info(f'Configuration loaded: \n'
                 f'\tToken: {self.config["github"]["token"]}\n'
                 f'\tLogs path: {self.config["logging"]["logs_path"]}')
def input_validation(input_args):
    """
    Validates input arguments
    :param input_args: list of arguments of type: ['format_icpms_linescans.py', 'path_to_data_folder']
    :return: False if not valid, else True is returned
    """
    # Determine if number of arguments are correct
    if len(input_args) != 2:
        logger.critical("Invalid number of arguments. Usage:")
        logger.critical("format_icpms_linescans <path_to_data_folder>")
        return False

    # Determine if directory specified exists
    if not os.path.isdir(input_args[1]):
        logger.critical(f'Directory "{input_args[1]}" does not exist')
        return False

    return True
    def _updatestate(self):
        self._readSensors()
        self.signalState.emit(DeviceHandler.hardwareState)

        #check that kettle volume is high enough to cover heating elements, otherwise disable them
        for kettleindex, kettlename in DeviceHandler.KETTLE_NAMES_GIVEN_ID.items(
        ):
            if kettlename in ("HLT", "BK"):
                if self.hardwareState.volumes[
                        kettleindex] < DeviceHandler.HEATING_ELEMENT_MIN_VOLUME:
                    if not self.hardwareState.kettleheatingelementsdisabled[
                            kettleindex]:
                        logger.critical(
                            f'Kettle {kettlename} heating elements disabled due to insufficient liquid volume'
                        )
                    self.hardwareState.kettleheatingelementsdisabled[
                        kettleindex] = True
                else:
                    self.hardwareState.kettleheatingelementsdisabled[
                        kettleindex] = False

        #check that HLT isn't about to flood, otherwise disable valve that adds water
        if self.hardwareState.volumes[DeviceHandler.KETTLE_IDS_GIVEN_NAME[
                "HLT"]] > DeviceHandler.KETTLE_MAX_VOLUME:
            if self.hardwareState.ballValves[0] == True:
                self.closeBallValve(0)
            if not self.hardwareState.HLTfilldisabled:
                logger.critical(
                    "Fresh water flow to HLT disabled due to excess volume")
            self.hardwareState.HLTfilldisabled = True
        else:
            self.hardwareState.HLTfilldisabled = False

        #check that pumps still have valid valve paths open
        for index, pump in enumerate(self.hardwareState.pumps):
            if pump == 1 and not self._pumpHasOpenPath(index):
                self._setPumpState(index, False)
                logger.critical(
                    f'Pump {index} caught running without valid ball valve path, pump disabled'
                )
Exemple #19
0
    def stake(self, amount_tao: int, uid: int):
        r""" Stakes token of amount to hotkey uid.
        """
        self.wallet.assert_coldkey()
        self.wallet.assert_coldkeypub()
        self.subtensor.connect()
        staking_balance = Balance.from_float(amount_tao)
        account_balance = self.subtensor.get_balance(
            self.wallet.coldkey.ss58_address)
        if account_balance < staking_balance:
            logger.critical(
                "Not enough balance (\u03C4{}) to stake \u03C4{}".format(
                    account_balance, staking_balance))
            quit()

        neurons = self._associated_neurons()
        neuron = neurons.get_by_uid(uid)
        if not neuron:
            logger.critical(
                "Neuron with uid: {} is not associated with coldkey.pub: {}".
                format(uid, self.wallet.coldkey.public_key))
            quit()

        logger.info(
            "Adding stake of \u03C4{} from coldkey {} to hotkey {}".format(
                staking_balance.tao, self.wallet.coldkey.public_key,
                neuron.hotkey))
        logger.info("Waiting for finalization...")
        result = self.subtensor.add_stake(wallet=self.wallet,
                                          amount=staking_balance,
                                          hotkey_id=neuron.hotkey,
                                          wait_for_finalization=True,
                                          timeout=bittensor.__blocktime__ * 5)
        if result:
            logger.success(
                "Staked: \u03C4{} to uid: {} from coldkey.pub: {}".format(
                    staking_balance.tao, uid, self.wallet.coldkey.public_key))
        else:
            logger.critical("Stake transaction failed")
Exemple #20
0
    def unstake(self, amount_tao: int, uid: int):
        r""" Unstaked token of amount to from uid.
        """
        self.wallet.assert_coldkey()
        self.wallet.assert_coldkeypub()
        self.connect_to_chain()
        unstaking_balance = Balance.from_float(amount_tao)
        neurons = self._associated_neurons()
        neuron = neurons.get_by_uid(uid)
        if not neuron:
            logger.critical(
                "Neuron with uid: {} is not associated with coldkey.pub: {}".
                format(uid, self.wallet.coldkey.public_key))
            quit()

        neuron.stake = self.subtensor.get_stake_for_uid(neuron.uid)
        if unstaking_balance > neuron.stake:
            logger.critical(
                "Neuron with uid: {} does not have enough stake ({}) to be able to unstake {}"
                .format(uid, neuron.stake, unstaking_balance))
            quit()

        logger.info(
            "Requesting unstake of \u03C4{} from hotkey: {} to coldkey: {}".
            format(unstaking_balance.tao, neuron.hotkey,
                   self.wallet.coldkey.public_key))
        logger.info("Waiting for finalization...")
        result = self.subtensor.unstake(wallet=self.wallet,
                                        amount=unstaking_balance,
                                        hotkey_id=neuron.hotkey,
                                        wait_for_finalization=True,
                                        timeout=bittensor.__blocktime__ * 5)
        if result:
            logger.success(
                "Unstaked: \u03C4{} from uid:{} to coldkey.pub:{}".format(
                    unstaking_balance.tao, neuron.uid,
                    self.wallet.coldkey.public_key))
        else:
            logger.critical("Unstaking transaction failed")
Exemple #21
0
 def _getThumbCacheFilesByUser(self, userName):
     output = self.volumeInfo
     if "FAT" or "NTFS" in output.split(" ")[0]:
         os.chdir("%s/%s/" % (self.mountDir, output.split(" ")[2]))
         logger.info("Loading every user info!")  # TODO:It should be per user!
         try:
             os.chdir("Users/")
         except FileNotFoundError:
             logger.error("Couldn't find Users folder!")
             return None
         for userDir in os.listdir("."):
             if userName != userDir:
                 continue
             if os.access("{0}/AppData/Local/Microsoft/Windows/Explorer/".format(userDir), os.F_OK | os.R_OK):
                 pass
             else:
                 logger.critical("Couldn't find Explorer path!")
                 continue
             fileList = glob.glob("{0}/AppData/Local/Microsoft/Windows/Explorer/thumbcache*.db".format(userDir))
             if fileList is []:
                 logger.critical("Thumbcache not found on user %s!" % userDir)
                 continue
             logger.critical(fileList)
             return fileList
Exemple #22
0
 def check_password(self, password):
     """ First checks if user has old SHA256 password. If a SHA256 pw returns as anything besides
     null, then we check if the password was valid. If it is valid we convert the plaintext into
     bcrypt PW, remove the old password, and commit the user to DB. Then we allow the Bcrypt
     pw hash comparison.
     """
     check = False
     if self.password is not None:
         logger.critical('Old style password exists.')
         if check_password_hash(self.password, password):
             self.passwd = password
             self.password = None
             db.session.add(self)
             db.session.commit()
             logger.critical('Old style password replaced.')
         else:
             return check
     try:
         check = bcrypt.check_password_hash(self._password.encode('utf8'),
                                            password.encode('utf8'))
     except:
         logger.critical('Error in password check.')
     finally:
         return check
Exemple #23
0
from aiogram import Bot, Dispatcher
from aiogram.types.message import ParseMode
from aiogram.utils import executor
from aiogram.utils.exceptions import ValidationError
from loguru import logger

from core import config
from core.packages import PackagesLoader

try:
    bot = Bot(token=config.BOT_TOKEN,
              validate_token=True,
              parse_mode=ParseMode.MARKDOWN_V2)
except ValidationError:
    logger.critical(
        "Bot token is invalid. Make sure that you've set a valid token in the .env file"
    )
    quit()

loop = asyncio.get_event_loop()
dp = Dispatcher(bot, loop=loop)
runner = executor.Executor(dp, skip_updates=config.BOT_SKIPUPDATES)

loader = PackagesLoader()

try:
    logger.debug('Connecting to a SQLA database with UID "{}"', config.DB_UID)
    conn = sqlanydb.connect(uid=config.DB_UID, pwd=config.DB_PASSWORD)
    curs = conn.cursor()
    logger.success(
        'Successfully connected to SQLAnywhere database as "{}". Reading table "{}"',
Exemple #24
0
async def main(experiment: "Experiment", dry_run: Union[bool, int],
               strict: bool):
    """
    The function that actually does the execution of the protocol.

    Arguments:
    - `experiment`: The experiment to execute.
    - `dry_run`: Whether to simulate the experiment or actually perform it. If an integer greater than zero, the dry run will execute at that many times speed.
    - `strict`: Whether to stop execution upon any errors.
    """

    # logger.warning("Support for pausing execution is EXPERIMENTAL!")
    logger.info(f"Using MechWolf v{__version__} ⚙️🐺")
    logger.info("Performing final launch status check...")

    tasks = []

    # Run protocol
    # Enter context managers for each component (initialize serial ports, etc.)
    # We can do this with contextlib.ExitStack on an arbitrary number of components
    try:
        with ExitStack() as stack:
            if not dry_run:
                components = []
                for component in experiment._compiled_protocol.keys():
                    components.append(stack.enter_context(component))
            else:
                components = list(experiment._compiled_protocol.keys())
            for component in components:
                # Find out when each component's monitoring should end
                procedures: Iterable = experiment._compiled_protocol[component]
                end_times: List[float] = [p["time"] for p in procedures]
                end_time: float = max(
                    end_times)  # we only want the last end time
                logger.trace(
                    f"Calculated end time for {component} as {end_time}s")

                for procedure in experiment._compiled_protocol[component]:
                    tasks.append(
                        wait_and_execute_procedure(
                            procedure=procedure,
                            component=component,
                            experiment=experiment,
                            dry_run=dry_run,
                            strict=strict,
                        ))
                logger.trace(f"Task list generated for {component}.")

                # for sensors, add the monitor task
                if isinstance(component, Sensor):
                    logger.trace(
                        f"Creating sensor monitoring task for {component}")
                    tasks.append(
                        _monitor(component, experiment, bool(dry_run), strict))
                logger.debug(f"{component} is GO!")
            logger.debug(f"All components are GO!")

            # Add a task to monitor the stop button
            tasks.append(check_if_cancelled(experiment))
            tasks.append(pause_handler(experiment, end_time, components))
            tasks.append(end_loop(experiment))
            logger.debug("All tasks are GO!")

            # Add a reminder about FF
            if type(dry_run) == int:
                logger.info(f"Simulating at {dry_run}x speed...")

            # begin the experiment
            logger.info("All checks passed. Experiment is GO!")
            experiment.is_executing = True
            experiment.start_time = time.time()

            # convert to local time for the start message
            _local_time = asctime(localtime(experiment.start_time))
            start_msg = f"{experiment} started at {_local_time}."

            logger.success(start_msg)

            try:
                done, pending = await asyncio.wait(
                    tasks, return_when=asyncio.FIRST_EXCEPTION)

                # when this code block is reached, the tasks will have either all completed or
                # an exception has occurred.
                experiment.end_time = time.time()

                # when this code block is reached, the tasks will have completed or have been cancelled.
                _local_time = asctime(localtime(experiment.end_time))
                end_msg = f"{experiment} completed at {_local_time}."

                # Stop all of the sensors and exit the read loops
                logger.debug("Resetting all components")

                # reset object
                for component in list(experiment._compiled_protocol.keys()):
                    # reset object
                    logger.debug(f"Resetting {component} to base state")
                    component._update_from_params(component._base_state)

                await asyncio.sleep(1)

                # Cancel all of the remaining tasks
                logger.debug("Cancelling all remaining tasks")
                for task in pending:
                    task.cancel()

                # Raise exceptions, if any
                logger.debug("Raising exceptions, if any")
                for task in done:
                    task.result()

                # we only reach this line if things went well
                logger.success(end_msg)

            except RuntimeError as e:
                logger.error(
                    f"Got {repr(e)}. Full traceback is logged at trace level.")
                logger.error("Protocol execution is stopping NOW!")
                logger.critical(end_msg)

            except ProtocolCancelled:
                logger.error(f"Stop button pressed.")
                logger.error("Protocol execution is stopping NOW!")
                logger.critical(end_msg)

            except Exception:
                logger.trace(traceback.format_exc())
                logger.error(
                    "Failed to execute protocol due to uncaught error!")
                logger.error("Protocol execution is stopping NOW!")
                logger.critical(end_msg)
    finally:

        # set some protocol metadata
        experiment.was_executed = True
        # after E.was_executed=True, we THEN log that we're cleaning up so it's shown
        # in the cleanup category, not with a time in EET
        logger.info("Experimentation is over. Cleaning up...")
        experiment.is_executing = False

        if experiment._bound_logger is not None:
            logger.trace("Deactivating logging to Jupyter notebook widget...")
            logger.remove(experiment._bound_logger)
Exemple #25
0
 def handle_exception(self, exception):
     logger.critical("got exception")
     logger.critical(exception)
     self.got_to_main_page()
Exemple #26
0
from loguru import logger
import sys
import os

logger.remove()
module = os.path.basename(__file__)
logger.add("logs/logs.log", format="{time} {level} {module} {message}")
logger = logger.bind(module=module)
logger.add(sys.stdout,
           format="{time} <level>{level} | {module} | {message}</level>",
           colorize=True)
logger = logger.bind(module=module)

logger.trace('Trace')
logger.debug('Debug')
logger.info('Info')
logger.success('Success')
logger.warning('Warning')
logger.error('Error')
logger.critical('Critical')
Exemple #27
0
is_config_exists = os.path.isfile('./config.json')

if is_config_exists == True:
    pass
elif is_config_exists == False:
    with open(path_config, 'a') as config_file:
        config_file.write(
            '{"db_host" : "localhost", "db_user" : "root", "db_password" : " ", "db_name" : "aki_accounts"}'
        )
        config_file.close()

with open(path_config, 'r') as config_file:
    try:
        _config = json.load(config_file)
    except:
        logger.critical('Не удалось загрузить конфиг')
        logger.info('Закрытие приложения...')

connection = pymysql.connect(host=_config['db_host'],
                             user=_config['db_user'],
                             password=_config['db_password'],
                             database=_config['db_name'],
                             autocommit=True)

cursor = connection.cursor()


# SIGN WINDOW
class SignWindow(QMainWindow):
    def __init__(self):
        QMainWindow.__init__(self)
Exemple #28
0
    def __build_model(self) -> None:
        """ Init transformer model + tokenizer + classification head."""

        if self.hparams.transformer_type == 'roberta-long':
            self.transformer = RobertaLongForMaskedLM.from_pretrained(
                self.hparams.encoder_model,
                output_hidden_states=True,
                gradient_checkpointing=True)

        elif self.hparams.transformer_type == 'longformer':
            self.transformer = AutoModel.from_pretrained(
                self.hparams.encoder_model,
                output_hidden_states=True,
                gradient_checkpointing=True,  #critical for training speed.
            )

        else:  #BERT
            self.transformer = AutoModel.from_pretrained(
                self.hparams.encoder_model,
                output_hidden_states=True,
            )

        logger.warning(f'model is {self.hparams.encoder_model}')

        if self.hparams.transformer_type == 'longformer':
            logger.warning('Turning ON gradient checkpointing...')

            self.transformer = AutoModel.from_pretrained(
                self.hparams.encoder_model,
                output_hidden_states=True,
                gradient_checkpointing=True,  #critical for training speed.
            )

        else:
            self.transformer = AutoModel.from_pretrained(
                self.hparams.encoder_model,
                output_hidden_states=True,
            )

        # set the number of features our encoder model will return...
        self.encoder_features = 768

        # Tokenizer
        if self.hparams.transformer_type == 'longformer' or self.hparams.transformer_type == 'roberta-long':
            self.tokenizer = Tokenizer(
                pretrained_model=self.hparams.encoder_model,
                max_tokens=self.hparams.max_tokens_longformer)
            self.tokenizer.max_len = 4096

        else:
            self.tokenizer = Tokenizer(
                pretrained_model=self.hparams.encoder_model, max_tokens=512)

        #others:
        #'emilyalsentzer/Bio_ClinicalBERT' 'simonlevine/biomed_roberta_base-4096-speedfix'
        # Ben's new architecture
        if self.hparams.nn_arch == 'ben1':
            self.classification_head = nn.Sequential(
                nn.Linear(self.encoder_features, self.encoder_features * 3),
                nn.Dropout(0.1),
                nn.Linear(self.encoder_features * 3, self.encoder_features),
                nn.Linear(self.encoder_features, self.hparams.n_labels),
            )

        elif self.hparams.nn_arch == 'ben2':
            self.classification_head = nn.Sequential(
                nn.Dropout(0.1),
                nn.Linear(self.encoder_features, self.encoder_features * 2),
                nn.Tanh(),
                nn.Linear(self.encoder_features * 2,
                          self.encoder_features * 3),
                nn.ReLU(),
                nn.Linear(self.encoder_features * 3, self.encoder_features),
                nn.Sigmoid(),
                nn.Linear(self.encoder_features, self.hparams.n_labels),
            )

        elif self.hparams.nn_arch == 'CNN':
            logger.critical('CNN not yet implemented')

        elif self.hparams.nn_arch == 'default':
            self.classification_head = nn.Sequential(
                nn.Linear(self.encoder_features, self.encoder_features * 2),
                nn.Tanh(),
                nn.Linear(self.encoder_features * 2, self.encoder_features),
                nn.Tanh(),
                nn.Linear(self.encoder_features, self.hparams.n_labels),
            )

        # Classification head
        elif self.hparams.single_label_encoding == 'default':
            self.classification_head = nn.Sequential(
                nn.Linear(self.encoder_features, self.encoder_features * 2),
                nn.Tanh(),
                nn.Linear(self.encoder_features * 2, self.encoder_features),
                nn.Tanh(),
                nn.Linear(self.encoder_features, self.hparams.n_labels),
            )

        elif self.hparams.single_label_encoding == 'graphical':
            logger.critical('Graphical embedding not yet implemented!')
Exemple #29
0
 def generate_error(self):
     logger.critical('''
         CRITICAL: Lost connection to SQL server at this ip: {}
         '''.format(Connection().ip))
Exemple #30
0
async def maintain_SCII_count(count: int,
                              controllers: List[Controller],
                              proc_args: List[Dict] = None):
    """Modifies the given list of controllers to reflect the desired amount of SCII processes"""
    # kill unhealthy ones.
    if controllers:
        toRemove = []
        alive = await asyncio.wait_for(asyncio.gather(*(c.ping()
                                                        for c in controllers
                                                        if not c._ws.closed),
                                                      return_exceptions=True),
                                       timeout=20)
        i = 0  # for alive
        for controller in controllers:
            if controller._ws.closed:
                if not controller._process._session.closed:
                    await controller._process._session.close()
                toRemove.append(controller)
            else:
                if not isinstance(alive[i], sc_pb.Response):
                    try:
                        await controller._process._close_connection()
                    finally:
                        toRemove.append(controller)
                i += 1
        for c in toRemove:
            c._process._clean()
            if c._process in kill_switch._to_kill:
                kill_switch._to_kill.remove(c._process)
            controllers.remove(c)

    # spawn more
    if len(controllers) < count:
        needed = count - len(controllers)
        if proc_args:
            index = len(controllers) % len(proc_args)
        else:
            proc_args = [{} for _ in range(needed)]
            index = 0
        extra = [
            SC2Process(**proc_args[(index + _) % len(proc_args)])
            for _ in range(needed)
        ]
        logger.info(f"Creating {needed} more SC2 Processes")
        for k in range(3):  # try thrice
            if platform.system() == "Linux":
                # Works on linux: start one client after the other
                new_controllers = [
                    await asyncio.wait_for(sc.__aenter__(), timeout=50)
                    for sc in extra
                ]
            else:
                # Doesnt seem to work on linux: starting 2 clients nearly at the same time
                new_controllers = await asyncio.wait_for(
                    asyncio.gather(*[sc.__aenter__() for sc in extra],
                                   return_exceptions=True),
                    timeout=50)

            controllers.extend(c for c in new_controllers
                               if isinstance(c, Controller))
            if len(controllers) == count:
                await asyncio.wait_for(asyncio.gather(*(c.ping()
                                                        for c in controllers)),
                                       timeout=20)
                break
            extra = [
                extra[i] for i, result in enumerate(new_controllers)
                if not isinstance(new_controllers, Controller)
            ]
        else:
            logger.critical("Could not launch sufficient SC2")
            raise RuntimeError

    # kill excess
    while len(controllers) > count:
        proc = controllers.pop()
        proc = proc._process
        logger.info(f"Removing SCII listening to {proc._port}")
        await proc._close_connection()
        proc._clean()
        if proc in kill_switch._to_kill:
            kill_switch._to_kill.remove(proc)
Exemple #31
0
def consolidate():
    """
    Converts previous archive data model to new one.
    """

    session = Session()
    try:
        logger.verbose('Checking archive size ...')
        count = session.query(flexget.components.archive.db.ArchiveEntry).count()
        logger.verbose('Found {} items to migrate, this can be aborted with CTRL-C safely.', count)

        # consolidate old data
        from progressbar import ETA, Bar, Percentage, ProgressBar

        widgets = ['Process - ', ETA(), ' ', Percentage(), ' ', Bar(left='[', right=']')]
        bar = ProgressBar(widgets=widgets, maxval=count).start()

        # id's for duplicates
        duplicates = []

        for index, orig in enumerate(
            session.query(flexget.components.archive.db.ArchiveEntry).yield_per(5)
        ):
            bar.update(index)

            # item already processed
            if orig.id in duplicates:
                continue

            # item already migrated
            if orig.sources:
                logger.info(
                    'Database looks like it has already been consolidated, item {} has already sources ...',
                    orig.title,
                )
                session.rollback()
                return

            # add legacy task to the sources list
            orig.sources.append(flexget.components.archive.db.get_source(orig.task, session))
            # remove task, deprecated .. well, let's still keep it ..
            # orig.task = None

            for dupe in (
                session.query(flexget.components.archive.db.ArchiveEntry)
                .filter(flexget.components.archive.db.ArchiveEntry.id != orig.id)
                .filter(flexget.components.archive.db.ArchiveEntry.title == orig.title)
                .filter(flexget.components.archive.db.ArchiveEntry.url == orig.url)
                .all()
            ):
                orig.sources.append(flexget.components.archive.db.get_source(dupe.task, session))
                duplicates.append(dupe.id)

        if duplicates:
            logger.info('Consolidated {} items, removing duplicates ...', len(duplicates))
            for id in duplicates:
                session.query(flexget.components.archive.db.ArchiveEntry).filter(
                    flexget.components.archive.db.ArchiveEntry.id == id
                ).delete()
        session.commit()
        logger.info('Completed! This does NOT need to be ran again.')
    except KeyboardInterrupt:
        session.rollback()
        logger.critical('Aborted, no changes saved')
    finally:
        session.close()