Пример #1
0
 def get_open_trades(cls):
     """Return type: Instance of <Trades> or None
     Get info about all open trades
     """
     #Log.write('"oanda.py" get_open_trades(): Entering.')
     trades_oanda = cls.fetch('{}/v3/accounts/{}/openTrades'
         .format(Config.oanda_url,str(Config.account_id))
         )
     if trades_oanda == None:
         Log.write('"oanda.py" get_open_trades(): Failed to get trades from Oanda.')
         return None
     else:
         ts = Trades()
         for t in trades_oanda['trades']: 
             # format into a <Trade>
             ts.append(Trade(
                 units=t['initialUnits'],
                 broker_name = cls.__str__(),
                 instrument = Instrument(Instrument.get_id_from_name(t['instrument'])),
                 stop_loss = t['stopLossOrder']['price'],
                 strategy = None,
                 take_profit = t['takeProfitOrder']['price'],
                 trade_id = t['id']
             ))
         return ts
Пример #2
0
 def __call__(self, parser, namespace, values, option_string=None):
     Log.cd('%r %r %r' % (namespace, values, option_string))
     # sync = sys.modules['__main__']  # http://stackoverflow.com/
     # questions/13181559#comment17940192_13181615
     setattr(namespace, self.dest, values)
     # print sync.Sync.sync_client
     Sync.newRequest(host=namespace.host, repo=values)
Пример #3
0
 def do_set_property(self, property, value):
     if property.name == "audiorate":
         caps = gst.caps_from_string("audio/x-raw-int, rate=%d" % (value))
         self.capsfilter.set_property("caps", caps)
         self.audiorate_property = value
     else:
         Log.warning("audioresample unknown property %s" % property.name)
Пример #4
0
def plugin_loaded():
    global watchdog, settings
    settings = load_settings()
    Log._set_verbosity(settings.verbosity)
    StackIDEManager.configure(settings)
    Win.show_popup = settings.show_popup
    watchdog = StackIDEWatchdog()
 def reset(cls):
     """
     Kill all instances, and forget about previous notifications.
     """
     Log.normal("Resetting StackIDE")
     StackIDEManager.kill_all()
     reset_complaints()
Пример #6
0
        def _generate_arguments_list(self, job_dict, job_name, arguments_dict):
            """
      Generate the arguments lists as follows:
      job.jvm_args, job.system_properties, job.main_entry, job.extra_args.
      """
            # prevent repeated generation for one job on different hosts/instances
            if any(getattr(self, args_type, None) != None for args_type in ARGUMENTS_TYPE_LIST):
                return

            if not job_dict.has_key("arguments"):
                Log.print_critical(
                    "The job %s must be configured with the `arguments` section."
                    " Please check your configuration file." % job_name
                )

            job_specific_arguments = job_dict["arguments"]
            job_common_arguments = arguments_dict[job_name]
            service_common_arguments = arguments_dict["service_common"]

            self._merge_arguments_dict(job_common_arguments, service_common_arguments)
            self._merge_arguments_dict(job_specific_arguments, job_common_arguments)

            # set job's attributes: job.jvm_args, job.system_properties, job.main_entry, job.extra_args
            for args_type in ARGUMENTS_TYPE_LIST:
                setattr(self, args_type, job_specific_arguments[args_type])
Пример #7
0
 def test_log_type(self):
     log = Log('ACCESSLOG')
     self.assertEqual('ACCESSLOG', log.get_log_type())
     log.set_log_type('WARNINGLOG')
     self.assertEqual('WARNINGLOG', log.get_log_type())
     self.assertRaises(AttributeError, log.set_log_type, None)
     self.assertRaises(AttributeError, Log, None)
Пример #8
0
class Shell:

  def __init__(self):
    self.builtins = Builtins(self)
    self.completion = Completion(self)
    self.history = History()
    self.javascript = Javascript()
    self.log = Log()
    self.prompt = Prompt()

  def execute(self, command):
    self.log.append(str(self.prompt) + command)
    self.history.append(command)

    if command:
      # execute builtins command
      try:
        self.builtins.execute(command.strip())
      except self.builtins.UnknownCommandError as e:
        self.log.append('websh: command not found: {0}'.format(e.command))
      except Exception as e:
        print 'Error in builtins: {0}'.format(e)

    return json.dumps({'javascript': str(self.javascript),
                       'log': str(self.log),
                       'prompt': str(self.prompt)})

  def template(self):
    # read template file
    file = open('data/template.html', 'r')
    template = string.Template(file.read())
    file.close()

    return template.substitute(log = str(self.log),
                               prompt = str(self.prompt))
    def test_log_file(self):

        if os.path.isfile(testing) :
            os.remove(testing)
        Log.create('FILE',testing,True)
        Log.info('wololo')
        eq_(True, os.path.isfile(testing))
    def __init__(self):
        """
        Initializes the map and whatever log file. Then initializes
        PARTICLE_COUNT number of particles. 
        """
        # initialize the drawing figure
        plt.ion()
        self.figure = plt.figure()
        self.pic_id = 1
        plt.draw()
        plt.imshow(plt.imread('map.png'))
        plt.axis([0,800,0,800])
        self.ax = self.figure.add_subplot(111)
        self.line, = plt.plot([], [], 'g.', markersize=5)

        # get data
        self.wean_map = map_py.Map('../data/map/wean.dat')
        log_file = Log('../data/log/robotdata3.log')
        self.log_entries = log_file.iterator()

        # initialize uniform random particles across all open cells
        self.open_cells = self.wean_map.open_cells()
        self.particles = []
        for i in range(PARTICLE_COUNT):
            self.particles.append(self.create_random())
Пример #11
0
 def get_trade(cls, trade_id):
     """Returns: <Trade> or None
     Get info about a particular trade.
     """
     trade_info = cls.fetch(
         '{}/v3/accounts/{}/trades/{}'.format(
             Config.oanda_url,
             str(Config.account_id),
             str(trade_id)
         )
     )
     try:
         trade = trade_info['trade']
         sl = None
         tp = None
         if 'stopLossOrder' in trade:
             sl = trade['stopLossOrder']['price']
         if 'takeProfitOrder' in trade:
             tp = trade['takeProfitOrder']['price']
         return Trade(
             units=trade['initialUnits'],
             broker_name = cls.__str__(),
             instrument = Instrument(Instrument.get_id_from_name(trade['instrument'])),
             stop_loss = sl,
             take_profit = tp,
             strategy = None,
             trade_id = trade['id']
         )
     except Exception:
         # Oanda returns 404 error if trade closed; don't raise Exception.
         Log.write('oanda.py get_trade(): Exception:\n{}'.format(sys.exc_info()))
         Log.write('"oanda.py" get_trade(): Failed to get trade info for trade with ID ', trade_id, '.')
         return None
Пример #12
0
	def __init__(self):
		try:
			self.__conn = mdb.connect(host="localhost",user="******",passwd="root",db="reader",charset="utf8")
		except mdb.Error,e:
			log = Log()
			errmsg = traceback.format_exc()
			log.error(errmsg)
Пример #13
0
    def _isEqual(self, sql):
        """
		return 1 means equal
		return 0 means don't have in db
		return -1 means don't equal
		"""
        try:
            db = Database()
            result = db.query(sql)

            if result is None:
                return 0
            dbAccount = result[0]["account"]
            dbPasswd = result[0]["password"]

            if self.account == dbAccount and self.passwd == dbPasswd:
                return 1
            else:
                return -1
        except:
            log = Log()
            errmsg = traceback.format_exc()
            log.error(errmsg)
        finally:
            # define db in try, then it can be access in finally?
            db.close()
Пример #14
0
    def install(self, module_name, module_version, module_url, prefix_dir=None):
        if not self.is_osx() and not self.is_git_module(module_url) and self.is_module_osx_only(module_name, module_version, prefix_dir):
            raise RuntimeError('Cannot install %s on platform %s' % (module_url, self.get_platform()))

        cmd = self.build_npm_cmd(prefix_dir=prefix_dir)
        cmd.extend(['install', module_url])

        attempts = 0
        attempt_delay = 5
        max_attempts = 10
        try_again = True

        while try_again:
            attempts = attempts + 1
            Log.info('Attempting to install node module %s (attempt #%s)', module_name, attempts)
            result, error = self.try_install(cmd, prefix_dir)

            if result is True and error is None:
                try_again = False
            elif attempts < max_attempts:
                time.sleep(attempt_delay)
            else:
                raise error

        return os.path.join(prefix_dir, 'node_modules', module_name)
Пример #15
0
def wait_for_job_stopping(service, cluster, job, host):
  '''
  Wait for a specified job to be stopped.
  '''
  while not check_job_stopped(service, cluster, job, host):
    Log.print_warning("Wait for %s on %s stopping" % (job, host))
    time.sleep(2)
Пример #16
0
def get_local_package_path(artifact, version):
  '''
  Get the local tarball path of the package of specified artifact and version

  @param  artifact  the artifact of the package
  @param  version   the version of the package
  @return string    the full path of the tarball
  '''
  if artifact == "zookeeper":
    package_path = get_local_package_path_general(
        get_deploy_config().get_zookeeper_package_dir(),
        artifact, version)
  elif artifact == "hadoop":
    package_path = get_local_package_path_general(
        get_deploy_config().get_hadoop_package_dir(),
        artifact, version)
  elif artifact == "hbase":
    package_path = get_local_package_path_general(
        get_deploy_config().get_hbase_package_dir(),
        artifact, version)
  elif artifact == "impala-shell" or artifact == "impala":
    package_path = get_local_package_path_general(
        get_deploy_config().get_imapala_package_dir(),
        artifact, version)
  else:
    Log.print_critical("Unknow artifact: %s" % artifact)
  return package_path
Пример #17
0
    def get_play(self):
        Log.print('ai: %s play options: %s %s' % (self.name, Cards.name(self.cards[0]), Cards.name(self.cards[1])))
        self.observer.print_state('ai')
        ret = self._get_required_play()
        if not ret:
            cards = sorted(self.cards)
            card = cards[0]
            other_card = cards[1]

            ret = {'card': card}
            if card == Cards.GUARD:
                (player, card, certainty) = self._most_likely(exclude_card=Cards.GUARD)
                if other_card == Cards.HANDMAIDEN and certainty < 1:
                    ret['card'] = Cards.HANDMAIDEN
                else:
                    ret['target'] = player.number
                    ret['challenge'] = card
            elif card == Cards.PRIEST:
                (player, card, certainty) = self._least_likely()
                if other_card == Cards.HANDMAIDEN:
                    ret['card'] = Cards.HANDMAIDEN
                else:
                    ret['target'] = player.number
            elif card == Cards.BARON:
                (player, certainty) = self._most_likely_less_than(other_card)
                if other_card == Cards.HANDMAIDEN and certainty < 1:
                    ret['card'] = Cards.HANDMAIDEN
                else:
                    ret['target'] = player.number
            elif card in (Cards.PRINCE, Cards.KING):
                (player, value) = self._highest_expected_value()
                ret['target'] = player.number

        return ret
Пример #18
0
    def build_dependency_tree(module_path):
        """
        given the path of a node module, calculate a dependency tree.
        each node in the tree represents a dependency, and contains
        name and version of the dependency.

        this should be run on a "fully materialized" module path, that is
        a path containing an npm module which has been "installed" and contains its
        dependencies on disk in descendent node_modules dirs
        """
        dep_map = {}
        dep_map[module_path] = tree()

        def get_deps_tree(check_path):
            if check_path == '/':
                raise RuntimeError('Unable to locate dep tree')
            if dep_map.get(check_path, None) != None:
                return dep_map[check_path]
            return get_deps_tree(os.path.dirname(check_path))

        for root, _, _ in os.walk(module_path):
            deps = get_deps_tree(root)

            try:
                stat = Npm.stat_module(root)
                new_deps = deps['%s@%s' % stat]
                dep_map[root] = new_deps
            except IOError:
                Log.verbose('%s is not a valid Node module. Skipping' % root)

        return dep_map[module_path]
Пример #19
0
    def main(self):

        #settings passed to tornado app
        tornado_settings = {
            "template_path": settings.template_path,
            "static_path": settings.static_path,
            "cookie_secret": settings.cookie_secret,
            "login_url": settings.login_url,
        }

        #init a logger
        self.init_logging(settings.log)

        #routes
        routes = self.init_routes()

        self._application = web.Application(routes,**tornado_settings)

        http_server = httpserver.HTTPServer(self._application)
        http_server.listen(settings.port)

        Log.info("Ready and listening")

        ioloop = tornado.ioloop.IOLoop().instance()
        autoreload.start(ioloop)
        try:
            ioloop.start()
        except KeyboardInterrupt:
            pass
Пример #20
0
 def do_set_property(self, property, value):
     if property.name == "width":
         self.width = value
         self._set_caps()
     elif property.name == "height":
         self.height = value
         self._set_caps()
     elif property.name == "xposition":
         self.x_position = value
         self._set_B_pad_property("xpos", self.x_position)
     elif property.name == "yposition":
         self.y_position = value
         self._set_B_pad_property("ypos", self.y_position)
     elif property.name == "enabled":
         self.enabled = value
         if self.enabled:
             self._set_B_pad_property("zorder", 10)
         else:
             self._set_B_pad_property("zorder", 0)
     elif property.name == "position":
         self.set_selected_position(value)
     elif property.name == "a-active":
         self.a_active = value
         self._set_active_a(value)
     elif property.name == "b-active":
         self.b_active = value
         self._set_active_b(value)
     else:
         Log.warning('PictureInPicture unknown property %s' % property.name)
Пример #21
0
class Controller(Parent):

    def models(self):
        self.logs = Log(self.config)

    def timestamps(self, msg, resp):
        logs = self.logs.mongoQuery({}, {'EVENTTIMESTAMP': 1})
        response = {}
        for log in logs:
            response[str(log._id)] = log.to_hash()
        resp.respond(response)

    def tags(self, msg, resp):
        logs = self.logs.mongoQuery({}, {'METADATA': 1})
        histogram = {'TAGS': {}}
        for log in logs:
            if 'TAGS' in log.METADATA:
                for row in csv.reader([log.METADATA['TAGS']]):
                    for tag in row:
                        if tag not in histogram['TAGS']:
                            histogram['TAGS'][tag] = {'NAME': tag, 'COUNT': 1}
                        else:
                            histogram['TAGS'][tag]['COUNT'] = histogram[
                                'TAGS'][tag]['COUNT'] + 1

        resp.respond(histogram)
def get_package_info(args, artifact, cluster):
  if (cluster.package_name and cluster.revision and cluster.timestamp):
    check_cluster_version(cluster, cluster.package_name)
    package_name = cluster.package_name
    revision = cluster.revision
    timestamp = cluster.timestamp
  elif (args.package_name and args.revision and args.timestamp):
    check_cluster_version(cluster, args.package_name)
    package_name = args.package_name
    revision = args.revision
    timestamp = args.timestamp
  else:
    package_info = get_latest_package_info(artifact,
      artifact + "-" + cluster.version + ".tar.gz")
    if package_info:
      package_name = package_info.get('package_name')
      revision = package_info.get('revision')
      timestamp = package_info.get('timestamp')
    else:
      Log.print_critical("No package found on package server of %s" %
        artifact + "-" + cluster.version + ".tar.gz")

  return {
    "package_name": package_name,
    "revision": revision,
    "timestamp": timestamp,
  }
Пример #23
0
 def get_status(self):
     """Fetch system information (stats)."""
     try:
         sysinfo = SystemStatus(update=True)
         return sysinfo
     except Exception as excpt:
         Log.exception("Error getting System Status: %s.", excpt)
Пример #24
0
 def get_env(self):
     now = datetime.datetime.now()
     uptime = now - self.launch_time
     days = uptime.days
     minutes, seconds = divmod(uptime.seconds, utilities.SECONDS_PER_MINUTE)
     hours, minutes = divmod(minutes, utilities.MINUTES_PER_HOUR)
     s = ('''
         Smart Module Status
           Software Version v{version}
           Running on: {platform}
           Encoding: {encoding}
           Python Information
            - Executable: {executable}
            - v{sys_version}
            - location: {executable}
           Timestamp: {timestamp}
           Uptime: This Smart Module has been online for:
           {days} days, {hours} hours, {minutes} minutes and {seconds} seconds.
     ''').format(
         version=utilities.VERSION,
         platform=sys.platform,
         encoding=sys.getdefaultencoding(),
         executable=sys.executable,
         sys_version=sys.version.split()[0],
         timestamp=now.strftime('%Y-%m-%d %H:%M:%S'),
         days=days,
         hours=hours,
         minutes=minutes,
         seconds=seconds,
     )
     s = utilities.trim(s) + '\n'
     try:
         self.comm.send("ENV/RESPONSE", s)
     except Exception as excpt:
         Log.exception("Error getting environment data: %s.", excpt)
Пример #25
0
class SytemInputFileReader(threading.Thread):

    def __init__(self, decode_work_queue):
        threading.Thread.__init__(self)
        self._stop = threading.Event()
        self.decode_work_queue = decode_work_queue

        self.logger = Log().getLogger()
        self.logger.debug("SytemInputFileReader(decode_work_queue:'%s')" % (decode_work_queue))

    def run(self):
        self.logger.debug("Thread started.")
        for line in sys.stdin:
            line = line.strip()
            if (line):
                self.logger.debug("Got new file to decode from system.in. Add '%s' to the decode work queue." % (line))
                self.decode_work_queue.put(line)
        
        self.logger.debug("Put the last None element to the queue.")
        self.decode_work_queue.put(None)

        self._stop.set()
        self.logger.debug("Thread stopped.")
            
    def stop (self):
        self._stop.set()
        sys.stdin.flush()

    def stopped (self):
        return self._stop.isSet()
Пример #26
0
 def load_site_data(self):
     field_names = '''
         id
         name
         wunder_key
         operator
         email
         phone
         location
         longitude
         latitude
     '''.split()
     try:
         sql = 'SELECT {fields} FROM site LIMIT 1;'.format(
             fields=', '.join(field_names))
         database = sqlite3.connect(utilities.DB_CORE)
         db_elements = database.cursor().execute(sql)
         for row in db_elements:
             for field_name, field_value in zip(field_names, row):
                 setattr(self, field_name, field_value)
         Log.info("Site data loaded.")
     except Exception as excpt:
         Log.exception("Error loading site data: %s.", excpt)
     finally:
         database.close()
Пример #27
0
	def close(self):
		try:
			self.__conn.close()
		except mdb.Error,e:
			log = Log()
			errmsg = traceback.format_exc()
			log.error(errmsg)
Пример #28
0
    def __getNextSubCycle(self, startByKey=None):
        lenSubCycles = len(self.subCycles)
        key = startByKey if (startByKey is not None and
        startByKey < lenSubCycles) else 0
        if self.__lapsedSeconds == 0:
            return key

        while self.__lapsedSeconds > 0:
            subCycle = self.subCycles[key]
            if type(subCycle) is Cycle:
                subCycle.setLapsedSeconds(self.__lapsedSeconds)
                self.__lapsedSeconds = 0
                break
            if self.__lapsedSeconds > subCycle.seconds:
                self.__lapsedSeconds -= subCycle.seconds
            else:
                tempSeconds = subCycle.seconds
                subCycle.seconds -= self.__lapsedSeconds
                # Escribir en el log
                Log.writeLog(key, self.name, self.numRelays,
                    subCycle.mode, subCycle.seconds)
                subCycle.run(self.numRelays)
                subCycle.seconds = tempSeconds
                self.__lapsedSeconds = 0

            key = (key + 1) if key < lenSubCycles - 1 else 0
        return key
Пример #29
0
def process_command_bootstrap(args):
  deploy_utils.check_admin_priviledge(args)
  args.update_config = True
  deploy_tool = SERVICE_DEPLOY_TOOL_MAP.get(args.service)
  if deploy_tool:
    return deploy_tool.bootstrap(args)
  Log.print_critical("Not implemented for service: %s", args.service)
Пример #30
0
 def upload_permanent_material(self, media_type, media, title=None, introduction=None):
     '''
     上传永久素材
     :param media_type: 素材类型, 支持类型包括('image', 'voice', 'video', 'thumb')
     :param media: 文件类型的素材
     如果上传的类型为 video, 需要以下额外的两个参数
     :param title: 视频标题
     :param introduction: 视频描述
     :return: 微信服务器返回的 JSON 数据
     {"type":"TYPE","media_id":"MEDIA_ID","created_at":123456789}
     官方API: http://mp.weixin.qq.com/wiki/5/963fc70b80dc75483a271298a76a8d59.html
     '''
     if media_type not in ('image', 'voice', 'video', 'thumb'):
         Log.error('upload media type not supported!')
         raise TypeError('upload media type not supported!')
     param = {
         'type': media_type
     }
     if media_type == 'video':
         # 类型为 video 需要额外的两个参数
         param.update({
             'title': title,
             'introduction': introduction
         })
     return self.request.post(
         url='https://api.weixin.qq.com/cgi-bin/material/add_material',
         params=param,
         files={
             'media': (media.name, media, media.name.split('.')[-1])
         }
     )
Пример #31
0
 def toJson(self):
     try:
         return json.dumps(self.items, sort_keys=True)
     except Exception as e:
         Log.wError('error when converting result to json: %s' % e)
         raise Exception(e)
Пример #32
0
 def GetMetrics(self, request, context):
     Log.debug("Came in GetMetrics")
     return self.pyinstance.get_metrics()
Пример #33
0
 def ResetMetrics(self, request, context):
     Log.debug("Came in ResetMetrics")
     self.pyinstance.reset_metrics()
     return request
Пример #34
0
 def GetFunctionStatus(self, request, context):
     Log.debug("Came in GetFunctionStatus")
     return self.pyinstance.get_function_status()
Пример #35
0
import peewee
from petition import Petition
from log import Log

if __name__ == '__main__':
    try:
        Petition.drop_table()
        Petition.create_table()
        Log.drop_table()
        Log.create_table()
    except peewee.OperationalError:
        print 'Tabela Petition ja existe!'
Пример #36
0
def main():
    default_file = "krono.sqlite"

    ap = argparse.ArgumentParser()
    ap.add_argument("-a", "--autosave", default=60, type=int)
    ap.add_argument("-f", "--file", default=default_file)
    ap.add_argument("-i", "--interactive", action="store_true")
    ap.add_argument("-p", "--project", default="")
    ap.add_argument("-n", "--notes", default="")
    ap.add_argument("-t", "--tags", default="")
    ap.add_argument("-v", "--view", action="store_true")
    ap.add_argument("--debug", action="store_true")
    args = vars(ap.parse_args())

    if args["debug"]:
        logging_level = logging.DEBUG
    else:
        logging_level = logging.INFO
    logging.basicConfig(level=logging_level,
                        format="[%(levelname)s] %(message)s")

    filepath = os.path.abspath(args["file"])

    if args["interactive"]:
        # If interactive mode chosen, enter curses-based command line
        # interface via CLI class.
        CLI().cmdloop()
    elif args["view"]:
        # If view chosen, view using Log.view() curses interface.
        try:
            log = Log()
            log.load_db(filepath)
            log.select_all()
            log.view()
            log.unload_db()
        except Exception as e:
            logging.error(e)
    else:
        # Instantiate Log object. Create DB if necessary, else load existing.
        log = Log()
        if not os.path.isfile(filepath):
            logging.info("Creating database file {}".format(filepath))
            try:
                log.create_db(filepath)
            except Exception as e:
                logging.error(e)
        else:
            try:
                log.load_db(filepath)
                logging.info("Loaded database file {}".format(filepath))
            except Exception as e:
                logging.error(e)

        # Add new row to end of DB with current datetime as start time.
        start_time = datetime.datetime.now()
        log.add_row({
            "start": datetime_to_string(start_time),
            "project": args["project"],
            "tags": args["tags"],
            "notes": args["notes"]
        })

        # Get id of last row added. This will be used to periodically update
        # DB with new end time in Session thread, as well as at the end of
        # this function (__main__.main()).
        last_row_id = log.get_last_row_id()

        # Use lock to manage access to DB between this and Session threads.
        db_lock = threading.Lock()
        sess = Session(log,
                       last_row_id,
                       autosave_interval=int(args["autosave"]),
                       lock=db_lock)
        sess.start()

        logging.info("New session started. Press Enter to stop.")
        if sys.version_info.major < 3:
            raw_input()
        else:
            input()

        # Write current datetime as end time before exiting.
        current_datetime = datetime_to_string(datetime.datetime.now())

        try:
            db_lock.acquire()
            log.update_row(last_row_id, {"end": current_datetime})
        except Exception as e:
            logging.error(e)
        finally:
            db_lock.release()
            log.unload_db()
Пример #37
0
keywords = args[1]
print("keywords[" + keywords + "]")

num_of_papers = 10000
path="../../data/" + keywords.replace(" ", "").replace("\"", "") + "/"
filename = "title"
timeout=30

import sys,os
if not os.path.exists(path):
	print("create directory[" + path + "]")
	os.mkdir(path)

sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../lib/utils")
from log import Log as l
log = l.getLogger()
log.info("get_papers_from_IEEE.py start.")
log.info("num_of_papers["+str(num_of_papers)+"]")

from IEEEXplore import IEEEXplore as X
xplore = X()
from IEEEXplore import Search_options as s
opts = s()
opts.PerPage = 100


if num_of_papers <= 0:
	log.warning("initial num_of_papers <= 0")
	sys.exit("initial num_of_papers <= 0")

all_papers, all_papers_urls, all_urls_of_papers_with_same_authors, all_urls_of_papers_with_same_keywords, all_citing_urls, all_cited_urls, all_urls_in_conference = xplore.get_papers_by_keywords(keywords, num_of_papers, search_options=opts, path=path, filename=filename, timeout=timeout)
Пример #38
0
import os

from flask import Flask, jsonify
from flask_login import LoginManager
from flask_marshmallow import Marshmallow
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy

from log import Log

log = Log("evolux-project").get_logger(logger_name="app")


db = SQLAlchemy()
ma = Marshmallow()
login_manager = LoginManager()


def create_app(config_name=None):
    log.info(f"Create app (config_name: {config_name})")
    if os.getenv("FLASK_CONFIG") == "production":
        log.info("Executing in PRODUCTION")
        app = Flask(__name__)
        log.info(f"Get configs from {os.getenv('FLASK_CONFIG')}")
        app.config.update(
            SECRET_KEY=os.getenv("SECRET_KEY"),
            SQLALCHEMY_DATABASE_URI=os.getenv("SQLALCHEMY_DATABASE_URI"),
            SQLALCHEMY_TRACK_MODIFICATIONS=os.getenv("SQLALCHEMY_TRACK_MODIFICATIONS"),
        )
    else:
        app = Flask(__name__, instance_relative_config=True)
Пример #39
0
    def run(self):
        """
		Start writing the file from the buffers in this thread.
		"""
        self._is_writing_interrupted = False

        # copy back buffer
        buf_before = self.buf_before.get_copy()
        self._buf_before_length = len(buf_before) / self._bytes_per_second
        self._file.writeframes("".join(buf_before))
        buf_before = None
        self._time_written = self._buf_before_length
        Log.debug(
            self._tag, "Writen %.2f seconds from before the hotword" %
            self._buf_before_length)

        # copy forward buffer
        while True:
            if self._is_writing_interrupted:
                Log.debug(self._tag, "Interrupt detected")
                break

            bytes = self.buf_after.get()
            self._file.writeframes("".join(bytes))

            additional_time_written = (len(bytes) / self._bytes_per_second)
            Log.debug(self._tag,
                      "Written %.2f seconds" % additional_time_written)
            self._time_written += additional_time_written

            if self.buf_after.capture_stopped() and self.buf_after.length(
            ) == 0:
                break

            time.sleep(3)

        self._file.close()

        if self._is_writing_interrupted and self._delete_active_recording:
            try:
                os.remove(self.filepath)
                Log.debug(
                    self._tag,
                    "Writing of %s interrupted after %.2f seconds of audio so file was deleted"
                    % (self.filename, self._time_written))
            except OSError:
                Log.error(
                    self._tag,
                    "Writing of %s interrupted after %.2f seconds of audio, but COULDNT DELETE"
                    % (self.filename, self._time_written))
        else:
            Log.debug(
                self._tag, "Written %.2f seconds of audio in %s" %
                (self._time_written, self.filename))
            self.clean_up = True
Пример #40
0
 def start(cls,star_radius):
     cls.terrain = [[Empty() for i in range(cls.size[1])] for j in range(cls.size[0])]
     cls.gen_star(star_radius)
     cls.gen_planets(int(Config.config['Constants']['CantPlanets']))
     cls.spawn_tests()
     Log.add('Board initialized')
Пример #41
0
class MyPanel(wx.Panel):
    """"""
 
    #----------------------------------------------------------------------
    def __init__(self, parent):
        """Constructor"""
        wx.Panel.__init__(self, parent)

        self.number_of_buttons = 0
        self.frame = parent
        self.log = Log(self)
        self.log.AppendText('it ')

        self.log.Bind(wx.EVT_CHAR_HOOK, self.onKey)
 
        self.mainSizer = wx.BoxSizer(wx.VERTICAL)
        controlSizer = wx.BoxSizer(wx.HORIZONTAL)
        self.widgetSizer = wx.GridSizer(2,2,3,3)
 
        self.addButton = wx.Button(self, label="Add")
        self.addButton.Bind(wx.EVT_BUTTON, self.onAddWidget)
        controlSizer.Add(self.addButton, 0, wx.CENTER|wx.ALL, 5)
 
        self.removeButton = wx.Button(self, label="Remove")
        self.removeButton.Bind(wx.EVT_BUTTON, self.onRemoveWidget)
        controlSizer.Add(self.removeButton, 0, wx.CENTER|wx.ALL, 5)
 
        self.mainSizer.Add(self.log)
        self.mainSizer.Add(controlSizer, 0, wx.CENTER)
        self.mainSizer.Add(self.widgetSizer, 0, wx.CENTER|wx.ALL, 10)
 
        self.SetSizer(self.mainSizer)
 
    #----------------------------------------------------------------------
    def onKey(self, event):
        keycode = event.GetKeyCode()
        if chr(keycode).isdigit():
            self.log.AppendText(" it's " + chr(keycode))
        event.Skip()


    #----------------------------------------------------------------------
    def onAddWidget(self, event):
        """"""
        self.number_of_buttons += 1
        label = "Button %s" %  self.number_of_buttons
        name = "button%s" % self.number_of_buttons
        d = Document('test', 'it was the best of times it was the worst of times')
        c = Channel(self, d, self.log)
        #new_button = wx.Button(self, label=label, name=name)
        self.widgetSizer.Add(c, 0, wx.ALL, 5)
        self.frame.fSizer.Layout()
        self.frame.Fit()
 
    #----------------------------------------------------------------------
    def onRemoveWidget(self, event):
        """"""
        if self.widgetSizer.GetChildren():
            self.widgetSizer.Hide(self.number_of_buttons-1)
            self.widgetSizer.Remove(self.number_of_buttons-1)
            self.number_of_buttons -= 1
            self.frame.fSizer.Layout()
            self.frame.Fit()
class Runner:
    def __init__(self, agent: AbstractAgent, runs_for_probability_estimation: int = 1):
        self.logger = Log("Runner")
        self.agent = agent
        self.runs_for_probability_estimation = runs_for_probability_estimation

    def execute_train(
        self,
        current_iteration: int,
        search_suffix: str,
        current_env_variables: EnvVariables,
        _start_time: float,
        random_search: bool = False,
    ) -> Tuple[EnvPredicatePair, float, float]:
        if self.runs_for_probability_estimation == 1:
            env_predicate_pair, execution_time, regression_time = execute_train(
                agent=self.agent,
                current_iteration=current_iteration,
                search_suffix=search_suffix,
                current_env_variables=current_env_variables,
                _start_time=_start_time,
                random_search=random_search,
            )
            self.logger.debug("--------------------------------------------------: end runner execution")
            return env_predicate_pair, execution_time, regression_time
        execution_start_time = time.time()

        num_of_cpus = multiprocessing.cpu_count()
        num_of_processes_to_spawn = (
            self.runs_for_probability_estimation if num_of_cpus >= self.runs_for_probability_estimation else num_of_cpus - 1
        )
        self.logger.debug("num of processes to spawn: {}".format(num_of_processes_to_spawn))

        search_suffixes = [search_suffix + "_run_" + str(i) for i in range(self.runs_for_probability_estimation)]

        queue = Queue()
        queue_result = Queue()
        # Create worker threads
        for _ in range(num_of_processes_to_spawn):
            worker = ProbabilityEstimationWorker(
                queue=queue, queue_result=queue_result, agent=self.agent, start_time=_start_time, random_search=random_search
            )
            # Setting daemon to True will let the main thread exit even though the workers are blocking
            worker.daemon = True
            worker.start()
        # Put the tasks into the queue as a tuple
        for search_suffix in search_suffixes:
            work_to_pass = (current_iteration, search_suffix, current_env_variables)
            queue.put(work_to_pass)
        # Causes the main thread to wait for the queue to finish processing all the tasks
        queue.join()

        env_predicate_pairs = []
        execution_times = []
        regression_times = []
        while not queue_result.empty():
            env_predicate_pair, execution_time, regression_time = queue_result.get_nowait()
            env_predicate_pairs.append(env_predicate_pair)
            execution_times.append(execution_time)
            regression_times.append(regression_time)

        execution_end_time = time.time()
        # execution_time = execution_end_time - execution_start_time
        execution_time = np.asarray(execution_times).sum()
        regression_time = np.asarray(regression_times).sum()
        adequate_performance_list = []
        regression_list = []

        for env_predicate_pair in env_predicate_pairs:
            adequate_performance_list.append(env_predicate_pair.is_predicate())
            if env_predicate_pair.is_predicate():
                regression_list.append(env_predicate_pair.is_regression())

        env_predicate_pair = EnvPredicatePair(
            env_variables=current_env_variables,
            probability_estimation_runs=adequate_performance_list,
            regression_estimation_runs=regression_list,
            model_dirs=search_suffixes,
        )
        self.logger.debug("--------------------------------------------------: end runner execution")
        return env_predicate_pair, execution_time, regression_time

    def execute_test_with_callback(self, current_env_variables: EnvVariables, n_eval_episodes: int = None) -> EnvPredicatePair:
        seed = np.random.randint(2 ** 32 - 1)
        return self.agent.test_with_callback(seed=seed, env_variables=current_env_variables, n_eval_episodes=n_eval_episodes)

    def execute_test_without_callback(self, n_eval_episodes: int, model_path: str) -> Tuple[float, float]:
        seed = np.random.randint(2 ** 32 - 1)
        return self.agent.test_without_callback(seed=seed, n_eval_episodes=n_eval_episodes, model_path=model_path)

    def execute_train_without_evaluation(
        self, current_iteration: int, current_env_variables: EnvVariables, search_suffix: str = "1"
    ) -> None:
        # agent.train sets seed globally (for tf, np and random)
        seed = np.random.randint(2 ** 32 - 1)
        self.agent.train(
            seed=seed, current_iteration=current_iteration, search_suffix=search_suffix, env_variables=current_env_variables
        )
Пример #43
0
from socket import *
import sys
from log import Log

print(format(sys.argv[0]))
log = Log('server' + format(sys.argv[1]) + ".log")

TAILLE_TAMPON = 256

if len(sys.argv) != 2:
    print("Usage: {} <port>".format(sys.argv[0]), file=sys.stderr)
    sys.exit(1)

sock = socket(AF_INET, SOCK_DGRAM)

# Liaison de la socket à toutes les IP possibles de la machine
sock.bind(('', int(sys.argv[1])))
print("Serveur en attente sur le port " + sys.argv[1], file=sys.stderr)
log.write_info("Server liscten to " + sys.argv[1])

while True:
    try:
        log.write_info("Server listening...")
        # Récupération de la requête du client
        requete = sock.recvfrom(TAILLE_TAMPON)
        # Extraction du message et de l’adresse sur le client
        (mess, adr_client) = requete
        ip_client, port_client = adr_client

        print("Requête provenant de {}. Longueur = {}". \
              format(ip_client, len(mess)), file=sys.stderr)
 def __init__(self, agent: AbstractAgent, runs_for_probability_estimation: int = 1):
     self.logger = Log("Runner")
     self.agent = agent
     self.runs_for_probability_estimation = runs_for_probability_estimation
Пример #45
0
class Prediction(Pipeline):
	
	def __init__(self, user_id, path_to_dataset, random_state=42):

		Pipeline.__init__(self, user_id, path_to_dataset, random_state)		

		self.log = Log()
		msg = self.__class__.__name__+'.'+utils.get_function_caller()+' -> enter'
		self.log.print(msg)


		self.user_id = user_id
		msg = 'user_id: ',self.user_id
		self.log.print(msg)

		self.path_to_dataset = path_to_dataset
		msg = 'path_to_dataset: ',self.path_to_dataset
		self.log.print(msg)

		self.random_state = random_state
		msg = 'random_state: ',self.random_state
		self.log.print(msg)		

		self.dataframe = pd.read_csv(self.path_to_dataset)

		self.prediction = None	

	def split_dataframe(self):

		msg = self.__class__.__name__+'.'+utils.get_function_caller()+' -> enter'
		self.log.print(msg)

		feature_names = [col for col in self.dataframe.columns if col!=self.target_column]	

		data = self.dataframe.copy()

		X = data[feature_names]
		y = data[self.target_column]
		
		msg = self.__class__.__name__+'.'+utils.get_function_caller()+' -> exit'
		self.log.print(msg)


		return X, y

	

	def decode_prediction(self, data):

		msg = self.__class__.__name__+'.'+utils.get_function_caller()+' -> enter'
		self.log.print(msg)		

		return super(Prediction, self).decode_target_feature(data)



	def predict(self):

		msg = self.__class__.__name__+'.'+utils.get_function_caller()+' -> enter'
		self.log.print(msg)

		super(Prediction, self).extract_features()

		super(Prediction, self).validate_column_type()

		super(Prediction, self).drop_this_first()

		self.X, self.y = self.split_dataframe()		

		self.X = super(Prediction, self).features_engineering(self.X)		

		self.X = super(Prediction, self).replace_infinite_numbers(self.X)		
				
		self.X, self.y = super(Prediction, self).handle_nan_values(self.X,self.y)		
		
		self.X = super(Prediction, self).drop_unnecessary_columns(self.X)		
				
		self.X = super(Prediction, self).encode_categorical_data(self.X)
		
		self.y = super(Prediction, self).encode_target_feature(self.y)			

		self.prediction = super(Prediction, self).predict(self.X)

		prediction_labels = self.decode_prediction(self.prediction)		

		msg = self.__class__.__name__+'.'+utils.get_function_caller()+' -> exit'
		self.log.print(msg)

		return self.prediction, prediction_labels

		
Пример #46
0
feats = meta.index
costs = meta[config.META_COSTS]

data_trn[feats] = (data_trn[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize
data_val[feats] = (data_val[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize
data_tst[feats] = (data_tst[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize
groups = meta[config.META_GROUPS].values if (config.META_GROUPS
                                             in meta.columns) else None

#==============================
print("Evaluating dataset:", config.dataset)

brain = Brain(None)
brain._load(file='model')

print("Performance on the last model:")

print("Performance on the best model:")
log_trn = Log(data_trn, hpc['train'], costs, brain, groups, "trn_best")
log_trn.log_perf(save_probs=True)

log_val = Log(data_val, hpc['validation'], costs, brain, groups, "val_best")
log_val.log_perf(save_probs=True)

log_tst = Log(data_tst, hpc['test'], costs, brain, groups, "tst_best")
# import pdb; pdb.set_trace()
log_tst.log_perf(histogram=True, save_probs=True)
Пример #47
0
import numpy as np
import self as self

from semantic import Semantic, error_manager

sys.path.append('../lexical')
sys.path.append('../utils')
sys.path.append('../grammar')
from lexical import Lex
from copy import deepcopy
from log import Log
from grammar import Gram, get_test_tokens
from error import *
from instruction import Instruction, InstructionManager

logger = Log("./logs/log.txt")
pd.set_option('display.max_columns', None)
# 显示所有行
pd.set_option('display.max_rows', None)
# 设置value的显示长度为100,默认为50
pd.set_option('max_colwidth', 200)
pd.set_option('display.width', 5000)
from enum import Enum


def print_line(info):
    n = len(info)
    nstar = (100 - n) // 2
    extra_star = (100 - n) % 2
    print('*' * nstar, info, '*' * (nstar + extra_star))
Пример #48
0
# -*- coding: utf-8 -*-
__time__ = '2018/3/2 10:19'
__author__ = '*****@*****.**'

import MySQLdb
from log import Log
import utils
import sys

logger = Log()
"""
mysql数据库相关
"""


class MysqlBase(object):
    def __init__(self, ipaddr, username, password, dbname, port):
        try:
            self.db = MySQLdb.connect(ipaddr, username, password, dbname, port)
            self.cursor = self.db.cursor()
        except:
            logger.exception("Can't connect to MySQL server on %s" % ipaddr)

    def __del__(self):
        self.cursor.close()
        self.db.close()

    #数据库查询操作
    def query_data(self, sql):
        try:
            self.cursor.execute(sql)
Пример #49
0
    def loadFromFile(self, filename):
        """
        Loads a graph from a JSON file.

        Parameters
        ----------
        filename : string with file path

        Raises
        ------
        ValueError
            When the graph is inconsistent as detected by the validateGraph() method.
        """
        Log.addLogMessage(Log.INFO, 'Opened grap ' + filename)
        self.filename = filename
        with open(filename, 'r') as f:
            jsonstr = f.read()

        jsondata = json.loads(jsonstr)

        # make sure the name becomes camelcase without spaces: required by CLaSH
        namestr = jsondata['name'].strip()
        namestr = string.capwords(namestr)
        namestr = namestr.replace(' ', '')
        self.name = namestr

        # Load the predefined clash types when available
        if 'clashtypes' in jsondata.keys():
            self.clashtypes = jsondata['clashtypes']

        # Load all nodes and their attributes
        for jsnode in jsondata['nodes']:
            nodeName = jsnode['name']
            nodeFunction = jsnode['function']
            nodeClashCode = ''
            if 'clashcode' in jsnode.keys():
                nodeClashCode = jsnode['clashcode']
            nodeColor = self.DEFAULT_NODE_COLOR
            if 'color' in jsnode.keys():
                nodeColor = jsnode['color']
            nodePosition = jsnode['pos'][0], jsnode['pos'][1]
            self.add_node(nodeName,
                          nodeFunction,
                          nodePosition,
                          clashcode=nodeClashCode,
                          color=nodeColor)

        # Load all edges and their attributes
        for jsedge in jsondata['edges']:
            edgeSource = jsedge['src']
            edgeDestination = jsedge['dst']
            edgeResNumber = jsedge['resnr']
            edgeArgNumber = jsedge['argnr']
            edgePRates = CSDFGraph._flattenRateList(jsedge.get('prates', [1]))
            edgeCRates = CSDFGraph._flattenRateList(jsedge.get('crates', [1]))
            edgeTokens = jsedge.get('tkns', [])
            edgeColor = self.DEFAULT_EDGE_COLOR
            if 'color' in jsedge.keys():
                edgeColor = jsedge['color']
            self.add_edge(edgeSource,
                          edgeDestination,
                          edgeResNumber,
                          edgeArgNumber,
                          edgePRates,
                          edgeCRates,
                          edgeTokens,
                          color=edgeColor)

        # Now that the graph is construcuted, validate it:
        self.validateGraph()
Пример #50
0
    def storeToFile(self, filename=''):
        """
        Stores the current graph in a JSON file.

        Parameters
        ----------
        filename : string with filepath
            filename is an optional argument containing the file in which graph is stored.
            When this argument is not used, the graph is stored in the file from  which it
            was initially read.
        """
        if filename == '':
            # no file name given so use file from which this graph is made
            fname = self.filename
        else:
            fname = filename

        # Put all info into a temporary dictionary which will be transformed into a json string
        graphDict = OrderedDict({})

        # First save graph properties/attributes: name and predefined CLaSH types
        graphDict['name'] = self.name
        if self.clashtypes is not None:
            graphDict['clashtypes'] = self.clashtypes

        # Store all the nodes of the graph in the temporary dictionary
        nodesList = []
        for nname in self.nodes():
            nodedict = OrderedDict({})
            nodedict['name'] = nname
            nodedict['function'] = self.node[nname]['funcstr']
            if self.node[nname]['clashcode'] != '':
                nodedict['clashcode'] = self.node[nname]['clashcode']
            nodedict['pos'] = list(self.node[nname]['pos'])
            nodedict['color'] = self.node[nname]['color']
            nodesList.append(nodedict)

        # add all nodes to temporary dict in form of a list
        graphDict['nodes'] = nodesList

        # Store all the edges of the graph in the temporary dictionary
        edgesList = []
        for srcname, dstname in self.edges():
            edgedict = OrderedDict({})
            edgedict['src'] = srcname
            edgedict['dst'] = dstname
            edgedict['resnr'] = self[srcname][dstname]['res']
            edgedict['argnr'] = self[srcname][dstname]['arg']
            edgedict['prates'] = self[srcname][dstname]['prates']
            edgedict['crates'] = self[srcname][dstname]['crates']
            edgedict['tkns'] = self[srcname][dstname]['tkns']
            edgedict['color'] = self[srcname][dstname]['color']
            edgesList.append(edgedict)

        # add all edges to temporary dict in form of a list
        graphDict['edges'] = edgesList

        # Last but not leat, write the graph to the file
        with open(fname, 'w') as outfile:
            json.dump(graphDict, outfile, indent=4)
            Log.addLogMessage(Log.INFO, 'Saved graph ' + fname)
Пример #51
0
class Server(threading.Thread):
    def __init__(self, queue, port, id):
        self.port = port
        self.id = id
        self.queue = queue
        self.title = constants.TITLE_FOLLOWER
        self.channel = network.Network(port, id)
        self.channel.start()
        self.leader = None
        self.running = True

        self.connected_servers = []

        self.last_heartbeat = 0
        self.heartbeat_timeout = 0
        self.process_heartbeat()
        self.heartbeat_frequency = 0.5
        self.election_start_time = 0
        self.election_timeout = 0  # Time to wait for heartbeat or voting for a candidate before calling election
        self.set_election_timeout()
        # Election variables
        self.id_received_votes = set()  # Id of servers who granted you votes
        self.id_refused_votes = set(
        )  # Id of servers who refused to vote for you
        self.num_received_votes = 0  # Number of votes received in current election

        # Persistent state variables
        # TODO: PERSIST; On server boot, retrieve information from disk
        self.current_term = 0  # Latest term server has seen
        self.voted_for = None  # CandidateId that received vote in current term
        self.log = Log()

        self.next_index = None  # For leader: indices for updating follower logs
        self.latest_index_term = None  # For leader: tuples of latest entry index and term for each follower. Used for commit

        self.load_state()
        threading.Thread.__init__(self)

    def set_election_timeout(self):
        self.election_timeout = 1.5 * random() + 1.5

    def process_heartbeat(self):
        self.last_heartbeat = time.time()
        self.heartbeat_timeout = 1.5 * random() + 1.5

    def request_votes(self):
        if not self.log.data:
            # Log is empty
            last_log_index = -1
            last_log_term = -1
        else:
            last_log_index = self.log.get(-1).index
            last_log_term = self.log.get(-1).term

        msg = RequestVoteMessage(self.id, self.current_term, last_log_index,
                                 last_log_term)
        for server in self.connected_servers:
            self.channel.send(msg, id=host_to_id[server[0]])
            # print "Requesting vote from server", host_to_id[server[0]]

        print "Vote requests sent to other servers"

    def request_remaining_votes(self, id_all_voters):
        if not self.log.data:
            # Log is empty
            last_log_index = -1
            last_log_term = -1
        else:
            last_log_index = self.log.get(-1).index
            last_log_term = self.log.get(-1).term

        msg = RequestVoteMessage(self.id, self.current_term, last_log_index,
                                 last_log_term)

        for server in self.connected_servers:
            server_id = host_to_id[server[0]]
            if server_id not in id_all_voters:
                self.channel.send(msg, id=server_id)
                # print "Requesting vote from server", host_to_id[server[0]]

            print "Vote requests sent to remaining servers who have not responded"

    def check_status(self):
        current_time = time.time()
        if self.title == constants.TITLE_LEADER:
            # Send AppendEntries to update follower logs
            for server in self.connected_servers:
                server_id = host_to_id[server[0]]
                next_index = self.next_index[server_id]

                # Send entries that the server has not received yet, if any
                if self.log.last_log_index() >= next_index:
                    entries = self.construct_entries_list(next_index)
                    if next_index == 0:
                        prev_log_index = -1
                        prev_log_term = -1
                    else:
                        prev_log_index = self.log.get(next_index - 1).index
                        prev_log_term = self.log.get(next_index - 1).term
                    msg = AppendEntriesMessage(self.current_term, self.id,
                                               prev_log_index, prev_log_term,
                                               entries,
                                               self.log.last_commit_index)

                    self.channel.send(msg, id=server_id)
                    print "AppendEntries sent to ", server_id

            if current_time - self.last_heartbeat >= self.heartbeat_frequency:
                self.send_heartbeats()
        elif self.title == constants.TITLE_FOLLOWER and current_time - self.last_heartbeat > self.heartbeat_timeout:
            # Heartbeat timeout passed as follower: Start election
            print "Election timeout as follower. No heartbeat. Become candidate and start new election"
            self.start_election()
        elif self.title == constants.TITLE_CANDIDATE and current_time - self.election_start_time > self.election_timeout:
            # Election timeout passed as candidate, without conclusion of election: Start new election
            print "Election timeout as candidate. Election has not yet led to new leader. Starting new election"
            self.set_election_timeout()
            self.start_election()
        elif self.title == constants.TITLE_CANDIDATE and current_time - self.election_start_time < self.election_timeout:
            # Election timeout has not passed as candidate
            print "As candidate, election timeout has not passed. Request votes from servers that have not responded"
            id_all_voters = self.id_received_votes.union(self.id_refused_votes)
            self.request_remaining_votes(id_all_voters)

    def construct_entries_list(self, index):
        entries = []
        for i in range(index, len(self.log)):
            entries.append(self.log.get(i))
        return entries

    def start_election(self):
        self.title = constants.TITLE_CANDIDATE
        self.reset_election_info()
        self.current_term += 1
        self.save_state()
        # TODO: Voted_for must persist
        self.voted_for = self.id
        self.save_state()
        self.update_votes(self.id, True)
        self.election_start_time = time.time()
        self.check_election_status()

        self.request_votes()

    def send_heartbeats(self):
        heartbeat = AppendEntriesMessage(self.current_term, self.id, -1, -1,
                                         [], self.log.last_commit_index)
        for server in self.connected_servers:
            self.channel.send(heartbeat, id=host_to_id[server[0]])
        self.process_heartbeat()

    def step_down(self):
        # Step down as leader or candidate, convert to follower
        # Reset various election variables
        if self.title == constants.TITLE_LEADER or self.title == constants.TITLE_CANDIDATE:
            self.title = constants.TITLE_FOLLOWER
            self.process_heartbeat()
            self.reset_election_info()

    def grant_vote(self, candidate_id):
        # TODO: Voted_for must persist
        self.voted_for = candidate_id
        self.save_state()
        print "Grant vote to", candidate_id
        self.channel.send(VoteReplyMessage(self.id, self.current_term, True),
                          id=candidate_id)

    def refuse_vote(self, candidate_id):
        self.channel.send(VoteReplyMessage(self.id, self.current_term, False),
                          id=candidate_id)
        print "Refuse vote to", candidate_id

    def majority(self):
        return (len(self.connected_servers) + 1) / 2 + 1

    def check_election_status(self):
        if self.num_received_votes >= self.majority():
            # Become leader when granted majority of votes
            self.become_leader()

    def become_leader(self):
        self.title = constants.TITLE_LEADER
        self.leader = self.id
        print "Election won - I am now LEADER"
        # TODO: Implement rest of leader initialization
        self.next_index = [len(self.log) for _ in range(len(addr_to_id))]

        if self.log.last_commit_index == -1:
            latest_index = None
        else:
            latest_index = self.log.last_commit_index

        if latest_index is None:
            latest_term = 0
        elif self.log.contains_at_index(latest_index):
            latest_term = self.log.get(latest_index).term
        else:
            latest_term = 0

        self.latest_index_term = [(latest_index, latest_term)
                                  for _ in range(len(addr_to_id))]
        self.latest_index_term[self.id] = (len(self.log) - 1,
                                           self.current_term)
        self.reset_election_info()
        self.send_heartbeats()

    def reset_election_info(self):
        self.id_received_votes = set()
        self.id_refused_votes = set()
        self.voted_for = None
        self.num_received_votes = 0

    # server_id: server that sent vote reply; vote_granted: True if vote granted
    def update_votes(self, server_id, vote_granted):
        if vote_granted:
            print "Received vote from", server_id
            self.id_received_votes.add(server_id)
            self.num_received_votes = len(self.id_received_votes)
            print "Number of received votes is now", self.num_received_votes
        else:
            print "Denied vote from", server_id
            self.id_refused_votes.add(server_id)

    def update_commits(self):
        index = max(self.next_index)

        i_count = 0
        t_count = 0
        while i_count < self.majority() and index >= 0:
            if index < 0:
                print "Error: Update_commits: index is less than 0"
            index -= 1
            t_count = 0
            i_count = 0
            for (i, t) in self.latest_index_term:
                if t == self.current_term:
                    t_count += 1
                if i >= index:
                    i_count += 1

        if t_count >= self.majority() and i_count >= self.majority():
            if self.log.last_commit_index < index:
                self.log.last_commit_index = index
                self.save_state()
            elif self.log.last_commit_index > index:
                print "Error: Update_commits: new commit index is lower than current commit_index"

            for entry in self.log.data:
                if not entry.client_ack_sent:
                    # TODO: Send client ack
                    ack_message = AcknowledgeMessage(ack=True,
                                                     msg_id=entry.msg_id)
                    self.channel.send(ack_message, id=entry.author)
                    entry.client_ack_sent = True

    def run(self):
        print "Server with id=", self.id, " up and running"
        while self.running:
            self.update_connected_servers()
            for server in list(addr_to_id.keys()):
                # if server not in self.connected_servers and not addr_to_id[server] == id:
                if server not in self.channel and not host_to_id[
                        server[0]] == self.id:
                    connected = self.channel.connect(server)
                    if connected:
                        print str("Server: Connected to " + server[0])
                        if server not in self.connected_servers:
                            self.connected_servers.append(server)
                    # print "Connected: ", connected

                data = self.channel.receive(RECEIVE_FREQ)
                if data:
                    # print "There is data on channel"
                    for server_id, msg in data:
                        self.process_msg(server_id, msg)
                else:
                    self.check_status()

    def process_msg(self, sender_id, msg):

        #print "Processing message from", sender_id, "of type", msg.type
        if msg.type == constants.MESSAGE_TYPE_REQUEST_VOTE:
            self.process_request_vote(sender_id, msg)

        elif msg.type == constants.MESSAGE_TYPE_VOTE_REPLY:
            self.process_vote_reply(sender_id, msg)

        elif msg.type == constants.MESSAGE_TYPE_REQUEST_LEADER:
            msg = messages.RequestLeaderMessage(leader=self.leader)
            self.channel.send(msg, id=sender_id)

        elif msg.type == constants.MESSAGE_TYPE_LOOKUP:
            self.process_lookup(sender_id, msg)

        elif msg.type == constants.MESSAGE_TYPE_POST:
            self.process_post(sender_id, msg)

        elif msg.type == constants.MESSAGE_TYPE_APPEND_ENTRIES:
            self.process_append_entries(sender_id, msg)

        elif msg.type == constants.MESSAGE_TYPE_ACKNOWLEDGE:
            self.process_acknowledge(sender_id, msg)

        # Used for testing purposes
        elif msg.type == constants.MESSAGE_TYPE_TEXT:
            print "From", msg.sender_id, ":", msg.msg

        else:
            print "Error: Invalid message type"

    def process_lookup(self, sender_id, msg):
        if self.title == constants.TITLE_LEADER or msg.override:
            print "-----> Processing Lookup from client"
            posts = self.log.get_committed_entries()
            msg = messages.LookupMessage(msg_id=msg.msg_id,
                                         post=posts,
                                         server_id=self.id)
            self.channel.send(msg=msg, id=sender_id)
        else:
            print "Lookup to leader"
            msg = messages.RequestLeaderMessage(leader=self.leader)
            self.channel.send(msg=msg, id=sender_id)

    def process_post(self, sender_id, msg):
        if self.title == constants.TITLE_LEADER:
            # TODO: Implement adding entry
            # TODO: PERSIST; implement in log class?
            entry = Entry(msg.post,
                          sender_id,
                          self.current_term,
                          len(self.log),
                          msg_id=msg.msg_id)

            if self.log.append(entry):
                self.save_state()
                self.latest_index_term[self.id] = (len(self.log) - 1,
                                                   self.current_term)
                print "---->Append entry from client to log"

        else:
            msg = messages.RequestLeaderMessage(leader=self.leader)
            self.channel.send(msg=msg, id=sender_id)

    def process_request_vote(self, sender_id, msg):
        if not self.log:
            # Log is empty
            last_log_index = -1
            last_log_term = -1
        else:
            last_log_index = self.log.get(-1).index
            last_log_term = self.log.get(-1).term

        # Handle message
        if msg.term < self.current_term:
            # If candidate's term is less than my term then refuse vote
            print "Refuse vote to server", sender_id, "because I have higher term"
            self.refuse_vote(msg.candidate_id)

        if msg.term > self.current_term:
            # If candidate's term is greater than my term then update current_term (latest term I've encountered),
            # Step down if leader or candidate
            self.current_term = msg.term
            self.save_state()
            # TODO: Step down if leader or candidate
            self.step_down()

        if msg.term >= self.current_term:
            # If candidate's term is at least as new as mine and I have granted anyone else a vote
            # and candidate's log is at least as complete as mine
            # then grant vote
            if self.voted_for is None or self.voted_for is msg.candidate_id:
                if last_log_term < msg.last_log_term or (
                        last_log_term == msg.last_log_term
                        and last_log_index <= msg.last_log_index):
                    self.grant_vote(msg.candidate_id)
        else:
            # print "Cand term, current_term:", msg.term, self.current_term
            # print "Voted for:", self.voted_for
            # print "Cand log term, last_log_term", msg.last_log_term, last_log_term
            # print "Cand log index, last_log_index", msg.last_log_index, last_log_index
            self.refuse_vote(msg.candidate_id)

    def process_vote_reply(self, sender_id, msg):
        if msg.term > self.current_term and not msg.vote_granted:
            # Step down if reply from someone with higher term
            # Extra condition for security.
            # If responder's term is higher, then vote should not be granted with correct execution
            self.current_term = msg.term
            self.save_state()
            print "Denied vote from", msg.follower_id
            self.step_down()
        else:
            # Take care of grant or refusal of vote
            self.update_votes(msg.follower_id, msg.vote_granted)
            self.check_election_status()

    def process_acknowledge(self, sender_id, msg):
        if msg.ack:
            print "Process Acknowledge from server. ACK == TRUE"
            self.next_index[sender_id] = msg.next_index
            self.latest_index_term[sender_id] = msg.latest_index_term
            self.update_commits()
        else:
            print "Process Acknowledge from server. ACK == FALSE"
            if self.next_index[sender_id] - 1 < 0:
                self.next_index[sender_id] = 0
            else:
                self.next_index[sender_id] -= 1
            if msg.term > self.current_term:
                self.current_term = msg.term
                self.save_state()
                self.step_down()

    def process_append_entries(self, sender_id, msg):
        if len(msg.entries) == 0:
            self.process_heartbeat()

            if msg.commit_index < len(self.log):
                self.log.last_commit_index = msg.commit_index
                self.save_state()

            self.leader = sender_id
            #print "Heartbeat received from server", sender_id

            if self.title == constants.TITLE_CANDIDATE or self.title == constants.TITLE_LEADER:
                self.step_down()

            elif self.title == constants.TITLE_LEADER:
                # TODO: If a "leader" receives a heartbeat,
                # it might have crashed and joined back in after an election (?)
                pass
        else:
            # TODO: Process AppendEntriesMessage
            print "-->Processing AppendEntriesMessage from leader"
            self.process_heartbeat()
            if msg.term > self.current_term:
                self.current_term = msg.term
                self.save_state()

            if self.title == constants.TITLE_CANDIDATE or self.title == constants.TITLE_LEADER:
                self.step_down()

            # Reject if my term is greater than leader term
            if self.current_term > msg.term:
                print "Error: Current term greater than leaders term"
                self.channel.send(AcknowledgeMessage(ack=False,
                                                     term=self.current_term),
                                  id=sender_id)

            # Accept. Self.log is empty and leader is sending all entries
            elif self.log.is_empty() and msg.prev_log_index == -1:
                print "Appending entries"
                # First entry to append is at index 0
                if self.log.append_entries(msg.entries):
                    self.log.last_commit_index = msg.commit_index
                    self.save_state()
                    i = self.log.last_log_index()
                    t = self.log.get(i).term
                    self.channel.send(AcknowledgeMessage(
                        ack=True,
                        next_index=len(self.log),
                        latest_index_term=(i, t)),
                                      id=sender_id)
                    print "Log after appending entries:"
                    self.log.show_data()
                else:
                    print "DET HER SKAL IKKE SKJE 1"

            # Accept. Check if self.log has an element at msg.prev_log_index
            elif self.log.contains_at_index(msg.prev_log_index):
                # Check if the term corresponds with msg.prev_log_term
                if self.log.get(msg.prev_log_index).term == msg.prev_log_term:
                    if self.log.append_entries(msg.entries):
                        self.log.last_commit_index = msg.commit_index
                        self.save_state()
                        i = self.log.last_log_index()
                        t = self.log.get(i).term
                        self.channel.send(AcknowledgeMessage(
                            ack=True,
                            next_index=len(self.log),
                            latest_index_term=(i, t)),
                                          id=sender_id)
                        print "Log after appending entries:"
                        self.log.show_data()
                    else:
                        print "DET HER SKAL IKKE SKJE NUMMER 2"
                else:
                    self.log.remove(msg.prev_log_index)
                    self.channel.send(AcknowledgeMessage(ack=False),
                                      id=sender_id)
            else:
                print "Send ACK-False"
                self.channel.send(AcknowledgeMessage(ack=False), id=sender_id)

    def save_state(self):
        storage.save(self.id, self.voted_for, self.current_term, self.log)

    def load_state(self):
        self.voted_for, self.current_term, self.log = storage.load(self.id)
        # print "voted for", self.voted_for
        print self.current_term
        print self.log

    def update_connected_servers(self):
        for addr in list(addr_to_id.keys()):
            if addr in self.channel.address_to_connection.keys(
            ) and addr not in self.connected_servers:
                self.connected_servers.append(id)

            if addr not in self.channel.address_to_connection.keys(
            ) and addr in self.connected_servers:
                self.connected_servers.remove(addr)
Пример #52
0
import requests
import hashlib
import json
from util import readConfig
from log import Log
ReadConfig = readConfig.ReadConfig()
log = Log()

host = ReadConfig.get_config("HTTP", "host")
mobile = ReadConfig.get_config("DATABASE", "mobile")
buyer_id = ReadConfig.get_config("DATABASE", "buyer_id")
shop_id = ReadConfig.get_config("DATABASE", "shop_id")

session = requests.Session()


def buyerLogin():
    # host='http://www.raincard.cn'
    url = '/api/v1/buyer/auth/sign/create'
    sign_str = "buyer_id=" + buyer_id + "&shop_id=" + shop_id + "Kbg6OdLm36vVpdn0Pp4x5OVB6SDovUmh"
    # sign_str = "mobile=" + mobile + "&shop_id=" + shop_id + "&Kbg6OdLm36vVpdn0Pp4x5OVB6SDovUmh"
    sign = hashlib.md5(sign_str.encode('utf - 8')).hexdigest().upper()
    # print(sign)
    params = {
        # "mobile":mobile,
        "buyer_id": buyer_id,
        "shop_id": shop_id,
        "sign": sign
    }
    re = session.post(host + url, params)
    print(re.url)
Пример #53
0
    def start(self):
        Log.info("Server.start()")

        self.webServer.start()
Пример #54
0
    def stop(self):
        Log.info("Server.stop()")

        self.webServer.stop()
Пример #55
0
import yaml
import struct

from log import Log
from imageutil import ImageUtil
from font import Font
from rectangle import Rectangle
from palette import Palette
from line import Line
from window import Window
from bitmap import Bitmap
from glyph import Glyph
from modifier import *
from osdobject import OSDObjectType, OSDObject

logger = Log.get_logger("engine")


class Scene(object):
    _BASE_DIR = None

    def __init__(self, yaml_file=None):
        self._yaml_file = None
        self._windows = []
        self._ingredients = {}
        self._palettes = {}
        self._modifiers = []
        self._width = -1
        self._height = -1
        self._frames = 1
        self._ticks = 20
Пример #56
0
    def __init__(self):
        Log.info("Server.init()")

        self.webServer = WebServer()
Пример #57
0
# coding: utf-8

import json

from .base import Base
from log import Log
from setting import RecordsStatus, MAX_SWIPE_DOWN_COUNT
from exception import ValidationError

logger = Log.logger(__file__)


class TARGETModel(Base):
    def __init__(self, driver):
        super().__init__(driver)
        self.target_list = []
        self.init()

    def exe(self):
        logger.info("-" * 30 + " step 1 " + "-" * 30)
        self.go_to_target()
        logger.info("-" * 30 + " step 2 " + "-" * 30)
        self.update_exe_on_target()
        logger.info("-" * 30 + " step 3 " + "-" * 30)
        self.click()
        logger.info("-" * 30 + " step 4 " + "-" * 30)
        self.update_app_status()

    def init(self):
        for i in self.rule.TARGET_SETTING:
            steps = i["STEPS"]
Пример #58
0
class CheckHandler(object):
    CheckHandlerLog = Log('CheckHandler')

    def loadCacheActiveTasks(self):
        try:
            cache = load_obj('activeTickets')

            for i in cache:
                activeTickets[i] = cache[i]

            self.CheckHandlerLog.info("[Cache] activeTickets загружен.")
        except Exception as exc:
            pass

    def loadCacheActiveReply(self):
        try:
            cache = load_obj('activeRepTickets')

            for i in cache:
                activeRepTickets[i] = cache[i]

            self.CheckHandlerLog.info("[Cache] activeRepTickets загружен.")
        except Exception as exc:
            pass

    def managerParse(self, ticket):

        if (re.match(u'Смена\s{1,}(ТП)?(тарифного)?', ticket.subject)):
            try:
                _domain = (re.search(
                    '\s[a-zA-Z]{1,15}((-|\.)[a-zA-Z]{1,10})?((-|\.)[a-zA-Z]{1,10})?\.([a-zA-Z]{1,6})(\.|\s)',
                    ticket.message).group(0)
                           ).strip().lower().encode('idna').decode('utf-8')
                _package = re.search('на (xS|S|M|L|XXL|MAX)',
                                     ticket.message).group(0).split()[1]

                cpanelUsersAccounts = getAccountsList()
                hosting = cpanelUsersAccounts[_domain].server
                username = cpanelUsersAccounts[_domain].username

                answer = cpanelApiClient[hosting].call(
                    'changepackage', user=username, pkg=_package)['result'][0]
                status = int(answer['status'])
                message = answer['statusmsg']

                if (status == 1):
                    self.CheckHandlerLog.info(
                        "[managerParse][%s][%s] смена тарифного плана. " %
                        (ticket.ticket_id, _domain))
                    self.openbot.sendMessageMe(
                        "[managerParse][%s][%s] смена тарифного плана. " %
                        (ticket.ticket_id, _domain))
                    hdapi.postQuickReply(
                        ticket.ticket_id,
                        "[OpenContactBot] Тарифный план изменен на %s для домена: %s "
                        % (_package, _domain), HdTicketStatus.CLOSED,
                        self.openbot)
                    return True
                else:
                    self.CheckHandlerLog.critical(
                        "[managerParse][%s][%s] %s." %
                        (ticket.ticket_id, _domain, ticket.message))
                    self.openbot.sendMessageMe(
                        "[managerParse][%s][%s] %s. " %
                        (ticket.ticket_id, _domain, ticket.message))
            except Exception as inst:
                self.CheckHandlerLog.critical("[managerParse] %s" % type(inst))
                self.CheckHandlerLog.critical("[managerParse] %s" % inst.args)
        return False

    def parseDomainbyTask(self, ticket):
        if re.match(u'Ошибки при автоматическом запуске хостинга',
                    ticket.subject):
            try:
                if ('Время ожидания операции истекло' in ticket.message):
                    self.CheckHandlerLog.info("[Таймаут][{0}] Закрыт".format(
                        ticket.ticket_id))
                    self.openbot.sendMessageMe("[Таймаут][{0}] Закрыт".format(
                        ticket.ticket_id))
                elif ('Данного хостинга нету на сервере' in ticket.message):
                    self.CheckHandlerLog.info(
                        "[Таймаут][{0}] Хостинг отсутствует на сервере.".
                        format(ticket.ticket_id))
                    self.openbot.sendMessageMe(
                        "[Таймаут][{0}] Хостинг отсутствует на сервере.".
                        format(ticket.ticket_id))

                Datebase().setTicketClose(ticket.ticket_id)
                return True

            except Exception as inst:
                self.CheckHandlerLog.critical(
                    "[parseDomainbyTask][запуск хостинга] %s" % (inst))
                self.CheckHandlerLog.critical(sys.exc_info()[0])
                return False

        if re.match(
                u'Изменение тарифного плана виртуального хостинга для домена',
                ticket.subject
        ) or (re.search(
                u'\<td\>В ДМС изменен тарифный план виртуального хостинга для домена',
                ticket.message) is not None):
            try:
                domain = re.search(
                    u'Изменение тарифного плана виртуального хостинга для домена (.+?)</td>',
                    ticket.message).group(1).strip().lower().encode(
                        'idna').decode('utf-8')
                #prevPackage = re.search(u'с плана \"(.+?)" на план',ticket.message).group(1)
                afterPackage = re.search(u'на план \"(.+?)"\.<br',
                                         ticket.message).group(1)

                cpanelUsersAccounts = getAccountsList()
                hosting = cpanelUsersAccounts[domain].server
                username = cpanelUsersAccounts[domain].username

                answer = cpanelApiClient[hosting].call(
                    'changepackage', user=username,
                    pkg=afterPackage)['result'][0]
                status = int(answer['status'])
                message = answer['statusmsg']
                #self.CheckHandlerLog.info("[Package][%s] Сообщение: %s" %(domain , message))

                if (status == 1):
                    self.CheckHandlerLog.info(
                        "[Package][%s][%s] смена тарифного плана. " %
                        (ticket.ticket_id,
                         domain.encode('idna').decode('idna')))
                    self.openbot.sendMessageMe(
                        "[Package][%s][%s] смена тарифного плана. " %
                        (ticket.ticket_id,
                         domain.encode('idna').decode('idna')))
                    Datebase().setTicketClose(ticket.ticket_id)
                else:
                    self.CheckHandlerLog.critical(
                        "[Package][%s][%s] %s." %
                        (ticket.ticket_id,
                         domain.encode('idna').decode('idna'), ticket.message))
                    self.openbot.sendMessageMe(
                        "[Package][%s][%s] %s. " %
                        (ticket.ticket_id,
                         domain.encode('idna').decode('idna'), ticket.message))
                return True
            except Exception as inst:
                self.CheckHandlerLog.critical("[Package] %s" % (inst))
                self.CheckHandlerLog.critical(sys.exc_info()[0])
                return False
        else:
            self.CheckHandlerLog.critical(
                "[parseDomainbyTask][%s] Заявка не классифицирована." %
                (ticket.ticket_id))
            self.openbot.sendMessageMe(
                "[parseDomainbyTask][%s] Заявка не классифицирована. " %
                (ticket.ticket_id))
            return False

    def getListTickets(self):
        try:
            list = []
            results = Datebase().getNewTicketsRows()

            for row in results:
                list.append(
                    Ticket(row[0], row[1], row[2], row[3], row[4], row[5],
                           row[6], row[7], row[8], row[9], row[10],
                           self.getTicketAttachments(row[0])))

            return list
        except Exception as inst:
            self.CheckHandlerLog.critical("[getListTickets] %s" % (inst))
            self.CheckHandlerLog.critical(sys.exc_info()[0])

    def getTicketAttachments(self, ticket_id):
        dict = {}
        results = Datebase().getNewTicketAttachments(ticket_id)

        for row in results:
            dict[row[
                1]] = "http://hd.ok.by/admin/ticket_attachments.php?ticket_id=%s&att_id=%s" % (
                    ticket_id, row[0])

        return dict

    def undefinedTicket(self, ticket):
        if (ticket.ticket_id not in activeTickets):
            activeTickets[ticket.ticket_id] = ticket
            ticket.message = self.cleanUpMessage(ticket.message)

            #append attachments to message
            for k, v in ticket.attachment.items():
                ticket.message += "\n<a href=\"%s\">%s</a>" % (v, k)

            self.CheckHandlerLog.info(
                "[Ticket][%s] Новая Заявка.\n %s \n %s \n %s" %
                (ticket.ticket_id, ticket.email, ticket.subject,
                 ticket.message))
            self.openbot.sendMessageGroup(
                "[Ticket][%s] Новая Заявка.\n %s \n %s \n %s" %
                (ticket.ticket_id, ticket.email, ticket.subject,
                 ticket.message), 'HTML', False)
            save_obj(activeTickets, 'activeTickets')

    def cleanUpMessage(self, message):

        message = re.sub(r'<br>|</p>', '\n', message)
        message = re.sub("<br />|</div>", ' ', message)
        message = re.sub("<.*?>", "", message)

        reg1 = re.compile(
            r"^[a-zA-zа-яА-ЯёЁ]+\,\s\d{2,2}\s\D+\s\d{4}\s\D\.\,\s\d{2,2}:\d{2,2}\s.*$",
            re.M)
        reg2 = re.compile(r"^\-{2,}\D\-{2,}.*$", re.M)
        reg3 = re.compile(
            r"^[\d]{2,2}.[\d]{2,2}.[\d]{4,4}\s[\d]{2,2}:[\d]{2,2},\s\D.*$",
            re.M)
        reg4 = re.compile(
            r"^[\d]{2,2}:[\d]{2,2},\s[\d]{2,2}\s\D+\d{4,4}\s\D.,\s\D.*$", re.M)
        reg5 = re.compile(r"^\d{1,}\D+\d{4,4}\s\D\.\,\s\d{1,}:\d{1,}\s\D.*$",
                          re.M)
        reg6 = re.compile(
            r"^[a-zA-Zа-яА-ЯёЁ]+\d{2,2}.\d{2,2}.\d{4,4}\s\d{2,2}:\d{2,2},\s\D.*$",
            re.M)
        reg7 = re.compile(
            r"^[A-Za-zА-Яа-яеЁ]+,\s[\d]{1,}\s[A-Za-zА-Яа-яеЁ]+\s\d{4}\s[г]\.\,\s.*$",
            re.M)
        reg8 = re.compile(
            r"^[A-Za-zА-Яа-яеЁ]+\s[\d]{2,2}.[\d]{2,2}.[\d]{4,4}\s[\d]{2,2}:[\d]{2,2},\s[A-Za-zА-Яа-яеЁ]+.*$",
            re.M)
        reg9 = re.compile(
            r"^[\d]{1,}\s[A-Za-zА-Яа-яеЁ]+\.\s[\d]{4,4}\s[\D].\s[\d]{1,}:[\d]{1,}\s[A-Za-zА-Яа-яеЁ]+.*$",
            re.M)
        reg10 = re.compile(r"^>[\D\d].*$", re.M)
        reg11 = re.compile(r"\n{1,}")
        reg12 = re.compile(r"^\-{2,}.*$", re.M)
        reg13 = re.compile(r"^\_{2,}.*$", re.M)

        if reg10.findall(message):
            message = reg10.sub('\n', message).strip('\n')

        reglist = [
            reg1.findall(message),
            reg2.findall(message),
            reg3.findall(message),
            reg4.findall(message),
            reg5.findall(message),
            reg6.findall(message),
            reg7.findall(message),
            reg8.findall(message),
            reg9.findall(message)
        ]
        for each in reglist:
            if (len(each) > 0):
                message = ''.join(message.split((''.join(each[0])), 1)[:-1])

        if reg12.findall(message):
            message = reg12.sub('\n', message).strip('\n')

        if reg13.findall(message):
            message = reg13.sub('\n', message).strip('\n')

        if reg11.findall(message):
            message = reg11.sub('\n', message).strip('\n')

        return message

    def checkNewReplies(self):
        replied_tickets = Datebase().getRepliesTicketsIdList()

        if (len(activeRepTickets) != 0):
            try:
                closedTickets = {
                    k: activeRepTickets[k]
                    for k in activeRepTickets.keys() ^ set(replied_tickets)
                }

                for rTicket in closedTickets:
                    self.CheckHandlerLog.info("[Ответ][%s] закрыт." % rTicket)
                    self.openbot.sendMessageGroup("[Ответ][%s] закрыт." %
                                                  rTicket)
            except KeyError:
                pass

            diff_ticket = {
                k: activeRepTickets[k]
                for k in activeRepTickets.keys() & set(replied_tickets)
            }

            activeRepTickets.clear()

            for i in diff_ticket:
                activeRepTickets[i] = diff_ticket[i]

            save_obj(activeRepTickets, 'activeRepTickets')

        for rTicket in replied_tickets:
            if rTicket not in activeRepTickets:
                time.sleep(0.5)

                for row in Datebase().getLastRepliesByTicketId(rTicket):
                    ticket = Ticket(row[0], row[1], row[2], row[3], row[4],
                                    row[5], row[6], row[7], row[8], row[9],
                                    row[10])

                    ticket.message = self.cleanUpMessage(ticket.message)

                    activeRepTickets[ticket.ticket_id] = ticket
                    save_obj(activeRepTickets, 'activeRepTickets')

                    self.CheckHandlerLog.info(
                        "[Reply][%s] Новый ответ.\n %s \n %s \n %s" %
                        (ticket.ticket_id, ticket.email, ticket.subject,
                         ticket.message))
                    self.openbot.sendMessageGroup(
                        "[Reply][%s] Новый ответ.\n %s \n %s \n %s" %
                        (ticket.ticket_id, ticket.email, ticket.subject,
                         ticket.message), 'HTML', False)

    def checkNewMessage(self):
        tickets = self.getListTickets()
        emailSpamList = Datebase().getSpamEmail().split('\r\n')

        try:
            closedTickets = {
                k: activeTickets[k]
                for k in activeTickets.keys() ^ set(ticket.ticket_id
                                                    for ticket in tickets)
            }

            for cTicket in closedTickets:
                self.CheckHandlerLog.info("[%s] закрыт." % cTicket)
                self.openbot.sendMessageGroup("[%s] закрыт." % cTicket)
        except KeyError:
            pass

        tempactiveTickets = {
            k: activeTickets[k]
            for k in activeTickets.keys() & set(ticket.ticket_id
                                                for ticket in tickets)
        }

        activeTickets.clear()

        for i in tempactiveTickets:
            activeTickets[i] = tempactiveTickets[i]

        save_obj(activeTickets, 'activeTickets')

        if not tickets:
            return

        for ticket in tickets:
            time.sleep(0.5)
            if (ticket.ticket_id in activeTickets):
                continue
            if re.match("\[s.\.open.by\] New account: \w{1,16}",
                        ticket.subject):
                self.CheckHandlerLog.info("[Создание][%s] Закрыт" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[Создание][%s] Закрыт" %
                                           ticket.ticket_id)
                Datebase().setTicketClose(ticket.ticket_id)
                continue
            if re.match("\[s.\.open.by\] Account Suspended: \w{1,16}",
                        ticket.subject):
                self.CheckHandlerLog.info("[Блокировка][%s] Закрыт" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[Блокировка][%s] Закрыт" %
                                           ticket.ticket_id)
                Datebase().setTicketClose(ticket.ticket_id)
                continue
            if re.match("\[s.\.open.by\] Account Unsuspended: \w{1,16}",
                        ticket.subject):
                self.CheckHandlerLog.info("[Pазблокировка][%s] Закрыт" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[Pазблокировка][%s] Закрыт" %
                                           ticket.ticket_id)
                Datebase().setTicketClose(ticket.ticket_id)
                continue
            if re.match("\[s.\.open.by\] Upgrade/Downgrade:", ticket.subject):
                self.CheckHandlerLog.info("[Package][%s] Закрыт" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[Package][%s] Закрыт" %
                                           ticket.ticket_id)
                Datebase().setTicketClose(ticket.ticket_id)
                continue
            if re.match("Undelivered Mail Returned to Sender", ticket.subject):
                continue
            if re.match("\[s.\.open.by\] Disk Usage Warning: The user",
                        ticket.subject):
                try:
                    account = re.search(
                        'Disk quota notification for \“(.+?)\”\.',
                        ticket.message).group(1)
                    quota = re.search(
                        'The account currently uses (.+?) of its',
                        ticket.message).group(1)
                    self.CheckHandlerLog.info(
                        "[Квота][%s] [%s] %s" %
                        (ticket.ticket_id, account, quota))
                    self.openbot.sendMessageMe(
                        "[Квота][%s] [%s] %s" %
                        (ticket.ticket_id, account, quota))
                    Datebase().setTicketClose(ticket.ticket_id)
                except Exception as inst:
                    self.CheckHandlerLog.critical("[DiskUsageWarning] %s" %
                                                  (inst))
                    self.CheckHandlerLog.critical(sys.exc_info()[0])
                continue
            if re.match(
                    u"\<\!\-\- head not allowed \-\->Домен\: \w{1,25}(-)?(\.)?(\w{1,25})?(\.)?(\w{1,25})?(-)?(\.)?\w{1,5}(-)?(\.)?(\w{1,5})?\; Сервер\: http(s)?\:\/\/s\d\.open\.by\:2087\/json\-api\/\; Действие: Успешно заблокирован",
                    ticket.message):
                self.CheckHandlerLog.info("[API block][%s] Закрыт" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[API block][%s] Закрыт" %
                                           ticket.ticket_id)
                Datebase().setTicketClose(ticket.ticket_id)
                continue
            if re.match(
                    u"\<\!\-\- head not allowed \-\->Домен\: \w{1,25}(-)?(\.)?(\w{1,25})?(\.)?(\w{1,25})?(-)?(\.)?\w{1,5}(-)?(\.)?(\w{1,5})?\; Сервер\: http(s)?\:\/\/s\d\.open\.by\:2087\/json\-api\/\; Действие: Успешно разблокирован",
                    ticket.message):
                self.CheckHandlerLog.info("[API unblock][%s] Закрыт" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[API unblock][%s] Закрыт" %
                                           ticket.ticket_id)
                Datebase().setTicketClose(ticket.ticket_id)
                continue
            if re.match(u"\[s.\.open.by\] Account Terminated:",
                        ticket.subject):
                self.CheckHandlerLog.info("[Удаление][%s] Закрыт" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[Удаление][%s] Закрыт" %
                                           ticket.ticket_id)
                Datebase().setTicketClose(ticket.ticket_id)
                continue
            if re.match(u"Сведения ИРЦ РУП Белтелеком за", ticket.subject):
                self.CheckHandlerLog.info("[Белтелеком][%s] Задержан" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[Белтелеком][%s] Задержан" %
                                           ticket.ticket_id)
                Datebase().setTickethold(ticket.ticket_id)
                continue
            if (len(ticket.attachment) > 0):
                listKeys = list(ticket.attachment.keys())

                if any(".jar" in s for s in listKeys):
                    self.CheckHandlerLog.info("[SPAM][%s] Заблокирован" %
                                              ticket.ticket_id)
                    self.openbot.sendMessageMe("[SPAM][%s] Заблокирован" %
                                               ticket.ticket_id)
                    Datebase().setSpamEmail(ticket.email)
                    Datebase().setTicketSpam(ticket.ticket_id)
                    continue

            if (ticket.client_id == 101373):
                if (self.managerParse(ticket)):
                    continue
            if (ticket.client_id == 94434):
                if (self.parseDomainbyTask(ticket)):
                    continue
            if ticket.email in emailSpamList:
                self.CheckHandlerLog.info("[Spam][%s] Перемещен" %
                                          ticket.ticket_id)
                self.openbot.sendMessageMe("[Spam][%s] Перемещен" %
                                           ticket.ticket_id)
                Datebase().setTicketSpam(ticket.ticket_id)
                continue
            else:
                self.undefinedTicket(ticket)

    def start(self, openbot):
        time.sleep(1)

        self.CheckHandlerLog.info('CheckHandler started.')
        self.openbot = openbot

        loadDataFromServers()
        self.loadCacheActiveTasks()
        self.loadCacheActiveReply()

        while 1:
            try:
                self.checkNewMessage()
                self.checkNewReplies()
                time.sleep(30)
            except Exception as exc:
                self.CheckHandlerLog.critical("[MAINLOOP] %s" % exc)
                self.openbot.sendMessageMe("[MAINLOOP] %s" % exc)
Пример #59
0
    def interrupt(self):
        """
		Interrupt writing.
		"""
        Log.debug(self._tag, "Interrupt triggered")
        self._is_writing_interrupted = True
import math
import os
import random
from abc import ABC, abstractmethod
from typing import Dict, List

import yaml

from algo.env_exec_details import EnvExecDetails
from log import Log
from param import Param
# I DID NOT CREATE AN INIT METHOD FOR INSTANTIATING LOG BECAUSE THIS OBJECT IS DEEP_COPIED DURING SEARCH
# AND DEEP_COPY FAILS IF THERE IS AN INSTANCE OF THE LOGGER OBJECT TO BE COPIED
from utilities import CONTINUAL_LEARNING_RANGE_MULTIPLIER, HOME

logger = Log("EnvVariables")


def load_env_params(algo_name=None,
                    env_name=None,
                    param_name=None,
                    model_suffix=None):
    # Load parameters from yaml file
    abs_params_dir = os.path.abspath(HOME + "/env_params")
    filename = (abs_params_dir + "/{}/{}.yml".format(env_name, algo_name)
                if not model_suffix else abs_params_dir +
                "/{}/{}_{}.yml".format(env_name, algo_name, model_suffix))
    with open(filename, "r") as f:
        params_dict = yaml.safe_load(f)
        if param_name in list(params_dict.keys()):
            return params_dict[param_name]