Beispiel #1
0
  def run(self):
    while True:
      # Check for dhcping.
      output = subprocess.check_output("which dhcping", shell = True)
      if not output:
        log.fatal("dhcping not found.")

      try:
        output = subprocess.check_output("dhcping %s" % (self.wnce_addr),
            shell = True, stderr = subprocess.STDOUT)
      except subprocess.CalledProcessError as e:
        output = str(e.output)

      lines = output.split("\n")
      responses = []
      for line in lines:
        if "Got answer from:" in line:
          response_addr = line.split(": ")[1]
          responses.append(response_addr)

          log.info("Got dhcp response from %s." % (response_addr))

      # If it somehow gets reset, the second address will be the one it has.
      bad_addrs = [self.wnce_addr, "192.168.1.251"]
      for addr in bad_addrs:
        if addr in responses:
          log.error("WNCE IS RUNNING ROGUE DHCP SERVER!")

          # Hibernating will automatically cut power to the wnce. It will,
          # however, also interrupt power to us.
          neato_system.hibernate()

      time.sleep(60)
Beispiel #2
0
	def run(self, *args, **kwargs) -> None:
		self.first_execution = time.perf_counter()  # monotonic on both Windows and Linux which is :thumbsup:
		self.first_execution_dt = datetime.datetime.utcnow()
		if not kwargs.get("bot", True):
			log.fatal("tried to login with a non-bot token (this framework is designed to run with a bot account)")
			raise UserBotError("Non-bot accounts are not supported")

		# checks to make sure everything is a coroutine
		if any([not asyncio.iscoroutinefunction(x) for x in self._ready_handlers]):
			log.critical("not all ready functions are coroutines")
			raise HandlerError("not all ready functions are coroutines")
		if any([not asyncio.iscoroutinefunction(x) for x in self._shutdown_handlers]):
			log.critical("not all shutdown functions are coroutines")
			raise HandlerError("not all shutdown functions are coroutines")
		if any([not asyncio.iscoroutinefunction(x) for x in self._message_handlers]):
			log.critical("not all message handlers are coroutines")
			raise HandlerError("not all message handlers are coroutines")
		if any([not asyncio.iscoroutinefunction(x) for x in self._member_join_handlers]):
			log.critical("not all member join handlers are coroutines")
			raise HandlerError("not all member join handlers are coroutines")
		if any([not asyncio.iscoroutinefunction(x) for x in self._member_remove_handlers]):
			log.critical("not all member leave handlers are coroutines")
			raise HandlerError(f"not all member leave handlers are coroutines")
		if any([not asyncio.iscoroutinefunction(x) for x in self._reaction_add_handlers]):
			log.critical("not all reaction add handlers are coroutines")
			raise HandlerError(f"not all reaction add handlers are coroutines")
		if any([not asyncio.iscoroutinefunction(x) for x in self._reaction_remove_handlers]):
			log.critical("not all reaction remove handlers are coroutines")
			raise HandlerError(f"not all reaction remove handlers are coroutines")

		log.debug("all functions good to run (are coroutines)")

		log.info(f"Bot started at {str(self.first_execution_dt)} ({self.first_execution})")
		super().run(*args, **kwargs)
Beispiel #3
0
def insert_into(table_name, data):
    try:
        cursor.execute(f"INSERT INTO  {table_name} {data}")
        conn.commit()
    except mysql.connector.errors.IntegrityError as e:
        # if e.errno == 1062:
        log.fatal(e.msg)
Beispiel #4
0
def update_user_cart(user_id, operation, isbn, count=None):
	"""
	:param count:
	:param isbn:
	:param user_id:
	:param operation: 0 delete
	 							1 update
    """
	now = '{0:%Y%m%d%H%M%S}'.format(datetime.datetime.now())
	# user: eu.Customer = user_cache[user_id]
	user: eu.Customer = _get_user(user_id)
	if user.type == "0000":
		return False
	if operation == 0:
		if user.add_shopping_cart(isbn, now):
			log.info(f"user {user_id} add book {isbn} to shopping cart")
			return True
		log.fatal(f"user {user_id} add book {isbn} to shopping cart failed")
		return False
	elif operation == 1:
		if user.update_shopping_cart(isbn, count):
			log.info(f"user {user_id} update book {isbn} to {count}")
			return True
		log.fatal(f"user {user_id} update book {isbn} to {count} failed")
		return False
Beispiel #5
0
    def __sanityCheckVectorLength(self, meta):
        for testcase in meta.testcases.list:
            step = 1
            for test_entry in testcase.content:
                if type(test_entry) is CLS.TestStep:
                    set_list = test_entry.set_list
                    expect_list = test_entry.expect_list
                    if self.__sanityCheckVectorLengthInStepList(
                            meta, set_list, step) == False:
                        return False
                    if self.__sanityCheckVectorLengthInStepList(
                            meta, expect_list, step) == False:
                        return False
                elif type(test_entry) is CLS.TestSequence:
                    vector_list = test_entry.list
                    for vector in vector_list:
                        vector = vector[5:]
                        signals = vector.split(" ")
                        pos = 0
                        for signal_val in signals:
                            meta_signal = meta.signals.getSignalAtPosition(pos)
                            if len(signal_val) != meta_signal.size:
                                log.fatal(
                                    "sanityCheckVectorLength: signal={0:s} with value={1:s} in step={2:d} has incorrect length"
                                    .format(meta_signal.name, signal_val,
                                            step))
                                return False
                            pos += meta_signal.size

                step += 1
        return True
Beispiel #6
0
    def run(self):
        while True:
            # Check for dhcping.
            output = subprocess.check_output("which dhcping", shell=True)
            if not output:
                log.fatal("dhcping not found.")

            try:
                output = subprocess.check_output("dhcping %s" %
                                                 (self.wnce_addr),
                                                 shell=True,
                                                 stderr=subprocess.STDOUT)
            except subprocess.CalledProcessError as e:
                output = str(e.output)

            lines = output.split("\n")
            responses = []
            for line in lines:
                if "Got answer from:" in line:
                    response_addr = line.split(": ")[1]
                    responses.append(response_addr)

                    log.info("Got dhcp response from %s." % (response_addr))

            # If it somehow gets reset, the second address will be the one it has.
            bad_addrs = [self.wnce_addr, "192.168.1.251"]
            for addr in bad_addrs:
                if addr in responses:
                    log.error("WNCE IS RUNNING ROGUE DHCP SERVER!")

                    # Hibernating will automatically cut power to the wnce. It will,
                    # however, also interrupt power to us.
                    neato_system.hibernate()

            time.sleep(60)
	def apply_constraint_region(self, property_name: str, region: list = None):
		"""
		:param property_name: str of property_name(column name)
		:param region: list of two number | None. Default None
		:return: condition for property_name
		"""
		if region is None or (region[0] is None and region[1] is None):
			return self
		try:
			self._where_check()
			if self._where:
				log.debug("where add apply_constraint_region")
			if None not in region:
				high = float(max(region))
				low = float(min(region))
				self.constraints += f" AND {low}<={property_name} AND {property_name}<={high}"
			elif region[0] is None:
				high = float(region[1])
				self.constraints += f" AND {property_name}<={high}"
			elif region[1] is None:
				low = float(region[0])
				self.constraints += f" AND {low}<={property_name}"
		except ValueError as e:
			self._revert_where_check()
			log.fatal(f"Invalid region constraint for {property_name}, get {region}.\nError: {e}")
		finally:
			return self
Beispiel #8
0
def check_filter_record(record,
                        tLogTypes=LOG_TYPES,
                        dtStartFilter=None,
                        dtStopFilter=None,
                        tFilters=(),
                        sFilterLogic=AND_FILTER_LOGIC):
    """
    Проверка соответствия записи фильтрам.
    @param record: Текущая проверяемая запись.
    @param tLogTypes: Кортеж/список типов сообщений.
    @param dtStartFilter: Начальная дата/время фильтра по времени.
        Если не определено, то выбор происходит с начала файла.
    @param dtStopFilter: Конечная дата/время фильтра по времени.
        Если не определено, то выбор происходит до конца файла.
    @param tFilters: Кортеж/список дополнительных методов фильтрации.
        Методы фильтрации задаются как lambda или функции, которые принимают
        Словарь записи, а возвращают True-запись попадает в выбор/False - не попадает.
    @param sFilterLogic: Комманда способа обработки дополнительных фильтров
        AND - Чтобы запись попала в выбор необходимо положительное выполнение всех фильтров,
        OR - Чтобы запись попала в выбор достаточно положительное выполнение одного фильтра.
    @return: True - Запись соответствует всем фильтрам и должна быть добавлена в выбор/
        False - Запись не соответствует какому либо фильтру.
    """
    msg_type = record.get('type', '')
    if msg_type not in tLogTypes:
        return False

    result = True
    dt = record.get('dt', None)
    if dtStartFilter:
        if dt:
            result = result and (dt >= dtStartFilter)
        else:
            log.warning(u'Ошибка определения даты записи %s' % str(record))
            return False
    if dtStopFilter:
        if dt:
            result = result and (dt <= dtStopFilter)
        else:
            log.warning(u'Ошибка определения даты записи %s' % str(record))
            return False

    # Дополнительные фильтры
    if tFilters:
        try:
            filter_result = [fltr(record) for fltr in tFilters]
        except:
            log.fatal(u'Ошибка проверки фильтров')
            return False

        if sFilterLogic == AND_FILTER_LOGIC:
            result = result and all(filter_result)
        elif sFilterLogic == OR_FILTER_LOGIC:
            result = result and any(filter_result)

    return result
Beispiel #9
0
 def __sanityCheckVectorLengthInStepList(self, meta, sig_list, step):
     for entry in sig_list:
         signal = meta.signals.getSignal(entry)
         if signal.role.endswith("_vec"):
             if len(sig_list[entry]) != signal.size:
                 log.fatal(
                     "sanityCheckVectorLength: signal={0:s} with value={1:s} in step={2:d} has incorrect length"
                     .format(entry, sig_list[entry], step))
                 return False
     return True
Beispiel #10
0
 def add_history_record(self, isbn, visit_time):
     try:
         self.cursor.execute(
             f"INSERT INTO browser_history(ISBN, time) VALUES ({isbn}, {visit_time})"
         )
         self.db.commit()
         return True
     except IntegrityError:
         log.fatal(f"Book [{isbn}] doesn't exist.")
         return False
Beispiel #11
0
    def create_table(self, table_name, definition) -> bool:
        """
		:param table_name:
		:param definition:
		:return: True(success), False(failed)
		"""
        try:
            self.cursor.execute(f"CREATE TABLE {table_name} ({definition})")
        except mysql.connector.errors.ProgrammingError as e:
            log.fatal(f"Create table failed: {e.msg}")
            return False
        return True
Beispiel #12
0
    def __diff_to_current(self, dependencies):
        """Calculated the diff of the parsed dependencies to the current dependencies"""
        arduino_dir = os.environ["ARDUINO_DIR"]

        if arduino_dir is None:
            log.fatal("No ARDUINO_DIR environment variable set")

        arduino_lib_path = os.path.join(arduino_dir, "libraries")

        current_dir = os.getcwd()

        unsynced_dependencies = []

        for dependency in dependencies:
            # Move to the arduino library directory
            os.chdir(arduino_lib_path)

            dependency_path = Path(os.path.join(os.getcwd(), dependency.name))

            # Short-circuit if the path doesn't exist
            if not dependency_path.exists():
                unsynced_dependencies.append(dependency)
                continue

            # If it's a directory, we need to check the version
            if dependency_path.is_dir():
                # Look for the .warm_dependency - file describing the current revision
                warm_path = os.path.join(str(dependency_path),
                                         ".warm_dependency")

                if not Path(warm_path).exists():
                    unsynced_dependencies.append(dependency)
                    continue
                else:
                    # Read in the file to determine revision. This is a data class serialised.
                    # Note the uncautious use of eval here. This file only ever exists on the
                    # users machine, so if they decide to inject malicious code then it's only
                    # affecting themselves.
                    with open(warm_path) as warm_properties:
                        current_dep = eval(warm_properties.read().strip())

                        if not current_dep == dependency:
                            unsynced_dependencies.append(dependency)
                            continue

        # Back to where we started
        os.chdir(current_dir)

        return unsynced_dependencies
Beispiel #13
0
def run():
	arg_parser = argparse.ArgumentParser(description=app_description, epilog=app_epilog,\
		prog=app_prog, formatter_class=argparse.RawDescriptionHelpFormatter)

	arg_parser.add_argument("comp", help="component name <required>")

	arg_parser.add_argument("--com", help="UART communication port <required>", required=True)
	arg_parser.add_argument("--tc", help="run only given testcases; comma delimeted, e.g. 1,2-5,7")

	arg_parser.add_argument("--verbose", help="logging verbosity (note, debug)", nargs='?', choices=['note', 'debug'])
	arg_parser.add_argument("--version", action="version", version="%(prog)s-" + app_version)

	args = arg_parser.parse_args()

	comm = args.com
	comp = args.comp

	tc_list = None
	if args.tc:
		tc_list = []
		for tc in args.tc.split(','):
			if '-' in tc:
				r = tc.split('-')
				b = int(r[0])
				e = int(r[1])
				for i in range(b, e + 1):
					tc_list.append(i)
			else:
				tc_list.append(int(tc))
		tc_set = set(tc_list)
		tc_list = list (tc_set)
		tc_list.sort()

	if args.verbose == 'note':
		verbose = log.Verbosity.NOTE
	elif args.verbose == 'debug':
		verbose = log.Verbosity.DEBUG
	else:
		verbose = log.Verbosity.INFO

	app_dir = os.path.dirname(exec_name)
	target_sim_path = os.path.realpath(app_dir + "/../../target_sim")
	if not os.path.isdir(target_sim_path):
		log.fatal("Directory {0:s} doesn't exist".format(target_sim_path))

	app = Impl(target_sim_path, comm, comp, tc_list, verbose)
	log.info("running hwsim")
	app.run()
Beispiel #14
0
 def update_shopping_cart(self, isbn, count):
     if int(count) == 0:
         self.cursor.execute(
             f"DELETE FROM shopping_cart WHERE ISBN='{isbn}'")
         self.db.commit()
         return True
     elif int(count) > 0:
         self.cursor.execute(
             f"UPDATE shopping_cart SET count={count} WHERE ISBN='{isbn}'")
         self.db.commit()
         return True
     else:
         log.fatal(
             f"Update shopping_cart for user [{self.user_id}: {self.user_name}] failed, get count={count}"
         )
         return False
Beispiel #15
0
    def __init__(self, run_dir: str, run_description: str, problem_type: str,
                 load_checkpoint: bool, model: nn.Module,
                 optimizer: optim.Optimizer, loss: nn.Module,
                 lr_scheduler: optim.lr_scheduler._LRScheduler,
                 metrics: List[Tuple[str,
                                     Callable[[torch.Tensor, torch.Tensor],
                                              Any]]], cudnn_autotune: bool):
        # Enable autotuning in CUDNN. Note that it is not a good idea to enable
        # this when input sizes or network structure may be different iteration
        # to iteration.
        torch.backends.cudnn.benchmark = cudnn_autotune
        run_dir = os.path.expanduser(run_dir)
        assert os.path.isdir(run_dir)
        self.run_dir = run_dir
        self.devices = [
            torch.device("cuda:{}".format(d))
            for d in range(torch.cuda.device_count())
        ]
        self.model = model
        if len(self.devices) > 1 and not isinstance(model, nn.DataParallel):
            log.fatal(
                "{} GPUs found, but model is not DataParallel. "
                "Wrap it into DataParallel for greater throughput.".format(
                    torch.cuda.device_count()))
        self.optimizer = optimizer
        self.loss_function = loss
        self.lr_scheduler = lr_scheduler  # May be None
        self.metrics = metrics
        self.epoch = 0
        self.global_step = 0
        self.best_loss = sys.float_info.max
        if problem_type not in [
                "multiclass_classification", "multilabel_classification"
        ]:
            raise ValueError(
                "Unsupported problem type: {}".format(problem_type))
        self.problem_type = problem_type  # E.g. "multilabel_classification"
        self.run_description = run_description

        self.checkpoint_path = os.path.join(self.run_dir, CHECKPOINT_FILE_NAME)
        self.best_checkpoint_path = os.path.join(self.run_dir,
                                                 BEST_CHECKPOINT_FILE_NAME)
        self.average_data_duration = performance.MovingAverage(32)
        self.average_compute_duration = performance.MovingAverage(32)
        # Checkpoint reload, if requested.
        if load_checkpoint:
            self._load_checkpoint()
Beispiel #16
0
 def query(self, query_dn, rrtype='A'):
     ns = self.ns_cycle.next()
     self._query_timing(query_dn, rrtype, ns)
     while True:
         # XXX
         # need to block signals because dnspython doesn't handle EINTR
         # correctly
         log.logger.block_signals()
         res = self._sendquery(query_dn, ns, rrtype)
         log.logger.unblock_signals()
         if not isinstance(res, N3MapError):
             ns.retries = 0
             return res
         if isinstance(res, TimeOutError):
             ns.add_timeouterror(self.max_retries)
             ns = self.ns_cycle.next()
             continue
         if isinstance(res, QueryError):
             log.fatal("received bad response from server: ", str(q.ns))
Beispiel #17
0
	def __loadSignalMap(self, fpath):
		map_file = open(fpath)
		line = map_file.readline()
		map_file.close()

		signals = line.split(' ')
		sig_map = []
		hiz_map = []
		for s in range(len(signals)):
			e = signals[s].split(':')
			n = e[0].split("-")
			sig_map.append((int(n[0]), n[1], int(e[1])))
			if n[1][0] == 'Z':
				if len(e) == 3:
					hiz_map.append((int(e[0].split("-")[0]), int(e[2])))
				else:
					log.fatal("HI-Z signal \"{0:s}\" do not have validation pin defined".format(n[1]))

		return sig_map, hiz_map
	def apply_constraint_value(self, property_names: [str], values=None):
		"""
		:param property_name:str of property_name(column name)
		:param values:
		:return: condition for property_name
		"""
		if values is None:
			return self
		try:
			self._where_check()
			# if type(values) == list:
			# 	if len(values) == 0:
			# 		return self
			if not isinstance(values, Iterable) or type(values) == str:
				values = [values]
			if not isinstance(property_names, Iterable) or type(property_names) == str:
				property_names = [property_names]
			for property_name in property_names:
				cons = ""
				for value in values:
					if type(value) == int or type(value) == float:
						cons += f" {property_name}={value} OR"
					else:
						cons += f" {property_name}='{value}' OR"
					# cons = f" {property_name}={value} OR"
				cons = re.sub(r"^ *OR *", "", cons)
				cons = re.sub(r" *OR *$", "", cons)
				self.constraints += f" AND {cons} "

			# if self._where:
			# 	log.debug("where add apply_constraint_value")
			# else:
			# 	if type(values) == int or type(values) == float:
			# 		self.constraints += f" AND {property_name}={values}"
			# 	else:
			# 		self.constraints += f" AND {property_name}='{values}'"

		except ValueError as e:
			self._revert_where_check()
			log.fatal(f"Invalid value constraint for {property_name}, get {values}.\nError: {e}")
		finally:
			return self
Beispiel #19
0
    def purchase(self, isbn, purchase_time, count):
        """
		Add this record to book_e_shop.shopping_history
		Add count to book_info.sold_count
		:param isbn:
		:param purchase_time:
		:param count:
		:return: True | False
		"""
        try:
            # check storage first
            e_shop_cursor.execute(
                f"SELECT storage_count FROM book_info WHERE ISBN='{isbn}'")
            storage_count = e_shop_cursor.fetchone()[0]
            if int(storage_count) < int(count):
                log.fatal(
                    f"Not enough storage for {isbn}, found {storage_count} require {count}"
                )
                return False
            # check end
            self.cursor.execute(
                f"INSERT INTO purchase_history(ISBN, time, count) VALUES ({isbn}, {purchase_time}, {count})"
            )
            self.db.commit()
            self.cursor.execute(f"DELETE FROM shopping_cart WHERE ISBN={isbn}")
            self.db.commit()
            e_shop_cursor.execute(
                f"INSERT INTO shopping_history(isbn, count, time) VALUES({isbn}, {count}, {purchase_time})"
            )
            book_e_shop.commit()
            e_shop_cursor.execute(
                f"UPDATE book_info SET sold_count=sold_count+{count} WHERE ISBN={isbn}"
            )
            book_e_shop.commit()
            e_shop_cursor.execute(
                f"UPDATE book_info SET storage_count=storage_count-{count} WHERE ISBN={isbn}"
            )
            book_e_shop.commit()
            return True
        except mysql.connector.errors.IntegrityError:
            log.fatal(f"Book [{isbn}] not exist")
            return False
Beispiel #20
0
 def add_shopping_cart(self, isbn, add_time):
     try:
         self.cursor.execute(
             f"INSERT INTO shopping_cart(isbn, count, time) VALUES ({isbn}, 1, {add_time})"
         )
     except mysql.connector.errors.IntegrityError as e:
         if e.errno == 1452:
             log.fatal(f"Book [{isbn}] doesn't exist.")
             return False
         else:
             # TODO: too complex
             self.cursor.execute(
                 f"SELECT count FROM shopping_cart WHERE ISBN='{isbn}'")
             count = self.cursor.fetchone()[0]
             self.cursor.execute(
                 f"UPDATE shopping_cart SET count={count + 1} WHERE ISBN='{isbn}'"
             )
     finally:
         self.db.commit()
         return True
Beispiel #21
0
def compile(protocol, args):
    """Compile C-language output"""
    if args.out == '-':
        outfile = sys.stdout
    else:
        outfile = open(args.out, 'w')

    if not outfile:
        fatal('compile', f'Could not open {args.out}')

    outfile.write(
        f'/** Generated by XMLProtoc {joinstr(".", *__version__)} on {isonow()} */\n'
    )  # noqa

    if args.prepend:
        outfile.write(open(args.prepend, 'r').read())

    protocol.accept(MessageDefinePrinterVisitor(outfile))

    if args.append:
        outfile.write(open(args.append, 'r').read())
Beispiel #22
0
def parse_msg_record(line, encoding=DEFAULT_ENCODING):
    """
    Распарсить строку файла журнала сообщений программы.
    @param line: Текущая обрабатываемая линия файла журнала сообщений программы.
    @return: {'dt': Время регистрации в datetime.datetime,
              'type': Тип сообщения INFO, WARNING и т.п.,
              'text': Текст сообщения,
              'short': Краткий текст сообщения ограниченный MSG_LEN_LIMIT}
    """
    dt_txt = line[:20].strip()
    # log.debug('Data: %s' % dt_txt)
    try:
        dt = datetime.datetime.strptime(dt_txt, DATETIME_LOG_FMT)
    except:
        log.fatal(u'Ошибка парсинга времени лога <%s>' % dt_txt)
        raise

    msg_type = get_msg_log_type(line)
    msg = line[21 + len(msg_type):].strip()
    short_msg = u''
    # Переведем все в unicode
    try:
        if isinstance(msg, str):
            msg = unicode(msg, encoding)
    except:
        log.fatal(u'Ошибка перевода строки в unicode')
        msg = u''
    try:
        if isinstance(msg, unicode):
            short_msg = msg[:MSG_LEN_LIMIT] + (
                u'...' if len(msg) > MSG_LEN_LIMIT else u'')
    except:
        log.fatal(u'Ошибка перевода строки в unicode')
    return dict(dt=dt, type=msg_type, text=msg, short=short_msg)
Beispiel #23
0
	def __init__(self, target_sim_path, comm, comp, tc, verbose):
		log.setVerbose(verbose)
		self.tc = tc
		self.failed_testcases = 0
		self.device_info = None
		self.communication = Communication(comm, self)

		self.comp = comp
		self.target_sim_path = target_sim_path

		self.metadata_file_path = target_sim_path + "/" + comp + ".mi"
		self.map_file_path = target_sim_path + "/" + comp + ".map"
		self.def_vector_path = target_sim_path + "/" + comp + "_df.vec"

		if not os.path.exists(self.metadata_file_path):
			log.fatal("Couldn't find metadata file: " + self.metadata_file_path)

		if not os.path.exists(self.def_vector_path):
			log.fatal("Couldn't find default vector file: " + self.def_vector_path)

		if not os.path.exists(self.map_file_path):
			log.fatal("Couldn't find map file: " + self.map_file_path)
Beispiel #24
0
def get_records_log_file(sLogFileName,
                         tLogTypes=LOG_TYPES,
                         dtStartFilter=None,
                         dtStopFilter=None,
                         tFilters=(),
                         sFilterLogic=AND_FILTER_LOGIC,
                         encoding=DEFAULT_ENCODING):
    """
    Прочитать из файла журнала сообщений программы сообщения указанных типов.
    @param sLogFileName: Полное имя log файла.
    @param tLogTypes: Кортеж/список типов сообщений.
    @param dtStartFilter: Начальная дата/время фильтра по времени.
        Если не определено, то выбор происходит с начала файла.
    @param dtStopFilter: Конечная дата/время фильтра по времени.
        Если не определено, то выбор происходит до конца файла.
    @param tFilters: Кортеж/список дополнительных методов фильтрации.
        Методы фильтрации задаются как lambda или функции, которые принимают
        Словарь записи, а возвращают True-запись попадает в выбор/False - не попадает.
    @param sFilterLogic: Комманда способа обработки дополнительных фильтров
        AND - Чтобы запись попала в выбор необходимо положительное выполнение всех фильтров,
        OR - Чтобы запись попала в выбор достаточно положительное выполнение одного фильтра.
    @param encoding: Кодировка файла журнала.
    @return: Список словарей формата:
        [{'dt': Время регистрации в datetime.datetime,
          'type': Тип сообщения INFO, WARNING и т.п.,
          'text': Текст сообщения,
          'short': Краткий текст сообщения ограниченный MSG_LEN_LIMIT}, ...]
    """
    if not sLogFileName:
        log.warning(
            u'Не определен файл журнала сообщений программы для чтения')
        return list()

    if not os.path.exists(sLogFileName):
        log.warning(u'Файл журнала сообщений программы <%s> не найден' %
                    sLogFileName)
        return list()

    log_file = None
    try:
        records = list()

        record = dict()
        log_file = open(sLogFileName, 'r')
        for line in log_file:
            # Определяем является ли текущая линия началом нового сообщения или продолжением предыдущего
            if is_new_msg(line):
                record = parse_msg_record(line, encoding=encoding)
                if check_filter_record(record, tLogTypes, dtStartFilter,
                                       dtStopFilter, tFilters, sFilterLogic):
                    records.append(record)
            else:
                if isinstance(line, str):
                    line = unicode(line, encoding)
                # record['text'] = record.get('text', u'') + LINE_SEPARATOR + line
                record['text'] = record.get('text', u'') + line
                record['short'] += (u'...' if not record.get(
                    'short', u'').endswith(u'...') else u'')

        log_file.close()
        return records
    except:
        if log_file:
            log_file.close()
        log.fatal(u'Ошибка чтения записей журнала сообщений программы <%s>' %
                  sLogFileName)
    return list()
Beispiel #25
0
    if args["doubleVerbose"]:
        log.default.level_console = log.VERB_DEBUG
        log.default.level_full = True

    if args["verbose"]:
        log.default.level_console = log.VERB_INFO

    log.debug(args)

    valid = True
    if args["command"] == "check-file":
        valid = main(args["infile"], args["use_schema"])
        if valid:
            log.notice("Check %s: PASS" % (args["infile"]))
        else:
            log.fatal("Check %s: FAIL" % (args["infile"]))

    elif args["command"] == "check-schemas":
        valid = check_schemas(args["path"])

    elif args["command"] == "index":
        index_files(args["path"])

    elif args["command"] == "scan-index":
        scan_index(args["infile"], args["use_mntner"])

    elif args["command"] == "scan":
        import time
        log.notice("## Scan Started at %s" %(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())))
        ck = scan_files(args["path"], args["use_mntner"], args["use_file"])
        log.notice("## Scan Completed at %s" %(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())))
Beispiel #26
0
    def __parse_dependencies(self):
        """Parse the dependency file and return a list of all found dependencies"""
        # Look for dependencies.warm in current directory
        log.info("Looking in the current directory for dependencies.warm")

        file_list = [
            f for f in os.listdir('.')
            if os.path.isfile(f) and f == 'dependencies.warm'
        ]
        dep_file = file_list[0] if file_list else None

        if dep_file is None:
            log.fatal(
                "Can't find dependencies.warm in the current directory - is it named properly?"
            )

        log.info("Found dependency file")

        all_deps = []

        current_working_dir = os.getcwd()

        # Use a temporary directory as the outer completion such that we use the same directory for all
        # git repo resolutions
        try:
            with TemporaryDirectory() as tempdir:
                with open(dep_file) as f:
                    for line_number, line in enumerate(f):
                        # If the line is a comment, skip it
                        if line.strip().startswith('//'):
                            continue

                        # The format of a dependency is as follows:
                        #
                        #                   windykeyboards/butt-in: 1.0
                        # [org or owner name] / [repo name] : [tag | commit hash | branch | plus]
                        #
                        # The following reads each dependency, detects version type, and constructs a
                        # named tuple describing the dependency.
                        try:
                            owner = line.strip().split('/')[0]
                            repo_name = line.strip().split('/')[1].split(
                                ':')[0].strip()
                            raw_version = line.strip().split(':')[1].strip()
                        except:
                            log.warn(
                                "Malformed dependency on line {line}".format(
                                    line=line_number + 1))
                            continue

                        # In future we can support more than just github. For now, hardcode a git https url
                        git_url = "https://github.com/{owner}/{repo}.git".format(
                            owner=owner, repo=repo_name)

                        parsed_version = self.__parse_version(
                            git_url, raw_version, tempdir)

                        if parsed_version is None:
                            log.warn("Malformed version on line {line}".format(
                                line=line_number + 1))
                            continue

                        version_type = parsed_version["type"]
                        version = parsed_version["version"]
                        commit_hash = parsed_version["commit_hash"]

                        all_deps.append(
                            Dependency(name=repo_name,
                                       git_url=git_url,
                                       version_type=version_type,
                                       version=version,
                                       current_hash=commit_hash))
        except PermissionError as e:
            log.warn(
                "Deletion of tempdir failed - this is not a problem, likely a Windows issue"
            )

        # Change back to the original working directory in case parsing changed the working dir
        os.chdir(current_working_dir)

        log.info("Found {0} dependencies for the current project".format(
            len(all_deps)))
        return all_deps
Beispiel #27
0
def _train(directory: Text, model_name: Text, cuda: bool, num_threads: int,
           num_workers: int, spawn_method: Text, output: Optional[BinaryIO],
           cache_file: Optional[Text], **kwargs) -> None:
    # configure PyTorch's OpenMP
    log.info(f'Number of threads: {num_threads}')
    _GLOBAL_INITARGS.num_threads = num_threads
    _GLOBAL_INITARGS.cache_file = cache_file
    global_initialize(_GLOBAL_INITARGS)

    data_folder = Path(directory)
    assert data_folder.is_dir(), 'Invalid data folder'

    if cuda and cache_file is not None:
        log.warn('Disk cache file may not be compatible with CUDA tensors.')

    model, model_info = get_model(model_name, cuda, kwargs)

    # preload data into memcache
    full_data_path = data_folder / 'train.csv'
    if full_data_path.exists():
        full_data = util.load_csv(full_data_path)
        log.info('Preparing data...')
        for smiles in full_data.keys():
            _ = model.process(smiles, **kwargs)

    # process folds
    folds = []
    roc_record = []
    prc_record = []
    for fold in sorted(data_folder.iterdir()):
        if not fold.is_dir():
            log.debug(f'Ignored "{fold}".')
            continue
        folds.append(fold)

    if output is not None and num_workers > 1:
        log.fatal('Attempt to save multiple instances of model.')

    # multiprocessing configuration
    if num_workers == 1:
        model = build_model(model_info)
        for fold in folds:
            roc_auc, prc_auc = process_fold(model, fold, **kwargs)
            roc_record.append(roc_auc)
            prc_record.append(prc_auc)

        # saving model parameter
        if output is not None:
            log.info('Saving model...')
            model.save_model(output)
    else:
        log.info(f'Number of workers: {num_workers}')

        # PyTorch's OpenMP seems to be not compatible with the "fork" method
        if spawn_method == 'fork' and num_threads > 1:
            log.warn(
                'The "fork" method may result in deadlock with multithreaded training.'
            )
        if spawn_method == 'spawn':
            log.warn(
                'The "spawn" method will invalidate memory molecule caches.')

        ctx = mp.get_context(spawn_method)
        with ctx.Pool(num_workers, global_initialize,
                      (_GLOBAL_INITARGS, )) as workers:
            args_iterator = zip(folds, repeat(model_info), repeat(kwargs))
            results = workers.imap_unordered(task_wrapper, args_iterator)

            for roc_auc, prc_auc in results:
                roc_record.append(roc_auc)
                prc_record.append(prc_auc)

    # basic statistics
    roc = torch.tensor(roc_record)
    prc = torch.tensor(prc_record)
    float_fmt = lambda x: float('%.4f' % x)
    log.info(f'roc = {list(map(float_fmt, roc_record))}')
    log.info(f'prc = {list(map(float_fmt, prc_record))}')
    log.info('All folds: ROC-AUC = %.3f±%.3f, PRC-AUC = %.3f±%.3f' %
             (roc.mean(), roc.std(), prc.mean(), prc.std()))
Beispiel #28
0
class AggressiveQueryProvider(QueryProvider):
    def __init__(self, 
                 ns_list, 
                 timeout,
                 max_retries,
                 stats=None,
                 query_interval=None,
                 num_threads=1):
        super(AggressiveQueryProvider,self).__init__(
                 ns_list, 
                 timeout,
                 max_retries,
                 stats,
                 query_interval)
        self._current_queryid = 0
        self._active_queries = {}
        self._results = {}
        self._query_queue = Queue.Queue()
        self._result_queue = Queue.Queue()
        self._querythreads = []
        self._start_query_threads(num_threads)

    def _start_query_threads(self,num=1):
        for i in xrange(num):
            qt = QueryThread(self._query_queue, self._result_queue)
            self._querythreads.append(qt)
            qt.start()

    def stop(self):
        for i in xrange(len(self._querythreads)):
            self._query_queue.put(None)
        for qt in self._querythreads:
            qt.join()

    def _gen_query_id(self):
        self._current_queryid += 1
        return self._current_queryid

    def _sendquery(self, query):
        self.stats['queries'] += 1
        log.debug2('query: ', query.query_dn, '; ns = ', query.ns, '; rrtype = ', query.rrtype)
        self._active_queries[query.id] = query
        self._query_queue.put(query)
        return query.id
    
    def _checkresult(self, qid, res):
        q = self._active_queries[qid]
        if not isinstance(res, N3MapError):
            q.ns.retries = 0
            self._results[qid] = res
            del self._active_queries[qid]
            return
        try:
            raise res
        except TimeOutError:
            try:
                q.ns.add_timeouterror(self.max_retries)
                q.ns = self.ns_cycle.next()
                self._sendquery(q)
            except TimeOutError, e:
                del self._active_queries[qid]
                raise e
        except QueryError:
            log.fatal("received bad response from server: ", str(q.ns))
Beispiel #29
0
def gen_txt_file(sTxtTemplateFilename, sTxtOutputFilename, dContext=None, output_encoding=None):
    """
    Генерация текстового файла по шаблону.
    @param sTxtTemplateFilename: Шаблон - текстовый файл.
    @param sTxtOutputFilename: Наименование выходного текстового файла.
    @param dContext. Контекст.
        В качестве контекста может выступать любая словарная структура.
        По умолчанию контекст - локальное пространство имен модуля config.
    @param output_encoding: Кодовая страница результирующего файла.
        Если не определена, то кодовая страница остается такая же как и у шаблона.
    @return: True - генерация прошла успешно,
        False - ошибка генерации.
    """
    template_file = None
    output_file = None

    template_filename = os.path.abspath(sTxtTemplateFilename)
    if not os.path.exists(template_filename):
        log.warning(u'Файл шаблона для генерации текстового файла <%s> не найден' % template_filename)
        return False

    # Чтение шаблона из файла
    try:
        template_file = open(template_filename, 'r')
        template_txt = template_file.read()
        template_file.close()
    except:
        if template_file:
            template_file.close()
        log.fatal(u'Ошибка чтения шаблона из файла <%s>' % template_filename)
        return False

    try:
        # Определить кодовую страницу текста
        template_encoding = strfunc.get_codepage(template_txt)
        log.debug(u'Кодовая страница шаблона <%s>' % template_encoding)

        # Шаблон необходимо проебразовать в юникод перед заполнением
        template_txt = unicode(template_txt, template_encoding)
    except:
        log.fatal(u'Ошибка преобразования текста шаблона в Unicode')
        return False

    # Генерация текста по шаблону
    gen_txt = gen(template_txt, dContext)
    if isinstance(gen_txt, unicode):
        # Перед записью необходимо обратно перекодировать текст
        if output_encoding is None:
            gen_txt = gen_txt.encode(template_encoding)
        else:
            gen_txt = gen_txt.encode(output_encoding)

    # Запись текста в выходной результирующий файл
    output_filename = os.path.abspath(sTxtOutputFilename)
    try:
        output_path = os.path.dirname(output_filename)
        if not os.path.exists(output_path):
            log.info(u'Создание папки <%s>' % output_path)
            os.makedirs(output_path)

        output_file = open(output_filename, 'w+')
        output_file.write(gen_txt)
        output_file.close()
        # Дополнительная проверка на существующий выходной файл
        return os.path.exists(output_filename)
    except:
        if output_file:
            output_file.close()
        log.fatal(u'Ошибка генерации текстового файла <%s> по шаблону.' % output_filename)
    return False