示例#1
0
文件: curator.py 项目: Tontonis/muons
class DataCurator(object):
    """
    This program runs periodically and sends data to the central server.
    """
    def __init__(self):
        self.log = Logger(__name__).setup()

    def run(self):
        self.log.info("Running data curator")

        schedule.every().day.at("20:55").do(self.curate)

        while True:
            schedule.run_pending()
            time.sleep(1)

    def curate(self):
        self.log.info("Curation job running...")
示例#2
0
    def _run(self):
        """ Main run loop. """
        NO_WORK_DELAY = 5  # FIXME move this to a settings file

        self.running = True
        while self.running:

            has_work = True
            while has_work:

                distro = self.get_next_distribution()
                if distro:
                    distro.start()
                else:
                    has_work = False

            Logger.log("No work to do.  Sleeping for {}s.".format(
                NO_WORK_DELAY
            ))
            gevent.sleep(NO_WORK_DELAY)
示例#3
0
class MuonDetector(gps.GPSListener, adc.EventListener, object):
    """
    Main entry point into the muon detector program.
    """

    def __init__(self):
        self.log = Logger(__name__).setup()

        # Read our node ID
        self.node_id = eeprom.get_id()

        # Load the GPS module
        self.gps_module = gps.GPS(self)

        # Load the ADC module
        self.adc_module = adc.ADC(0, self)

        self.filesystem = client.FileSystem("root://localhost//tmp")
        self.create_event_file()

    def run(self):
        """
        Main function that will load all the necessary modules and start
        taking data.
        """
        self.log.info("Running muon detector")

        self.gps_module.start()
        self.adc_module.start()

        self.gps_module.join()
        self.adc_module.join()

    def on_event(self, data):
        """
        Do something when we get an event from the ADC
        """
        event = Event(data, self.gps_module.current_timestamp,
                      self.gps_module.current_lat,
                      self.gps_module.current_lon)

        self.log.debug("Got response from ADC: %s" % event)

        # Dump the event to a file
        print(str(event), file=open('/tmp/event.txt', 'a'))

    def on_gpgga(self, gpgga):
        """
        Do something when we get a GPS pulse
        """
        self.log.debug("on_gpgga()")

    def create_event_file(self):
        """
        Creates an event file in the xrootd filesystem (if it doesn't exist)
        """
        status, statinfo = self.filesystem.stat("/tmp/event.txt")
        if statinfo is None:
            with client.File() as f:
                f.open("root://localhost//tmp/event.txt", OpenFlags.NEW)
示例#4
0
    def __init__(self):
        self.log = Logger(__name__).setup()

        # Read our node ID
        self.node_id = eeprom.get_id()

        # Load the GPS module
        self.gps_module = gps.GPS(self)

        # Load the ADC module
        self.adc_module = adc.ADC(0, self)

        self.filesystem = client.FileSystem("root://localhost//tmp")
        self.create_event_file()
示例#5
0
    def __init__(self):
        self.log = Logger(__name__).setup()

        # Read our node ID
        self.node_id = eeprom.get_id()

        # Load the GPS module
        self.gps_module = gps.GPS(self)

        # Load the ADC module
        self.adc_module = adc.ADC(self)

        # Load the temperature and pressure sensors
        self.temp_module = temp.TemperatureSensor()
        self.pressure_module = pressure.PressureSensor()

        self.filesystem = client.FileSystem("root://localhost//tmp")
        self.create_event_file()

        # Register signal handler
        signal.signal(signal.SIGINT, self.signal_handler)
示例#6
0
class DriverFactory:
    """
        # singleton
        单例模式,保证全局只有有一个driver
    """
    _instance = None
    _dirver = None
    Chrome_driver_path = os.path.join(os.path.dirname(__file__),
                                      "drivers" + os.sep + "chromedriver.exe")
    logger = Logger()

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super().__new__(cls)
        return cls._instance

    @classmethod
    def get_driver(cls, browers="chrome"):
        if cls._dirver is None:
            cls._dirver = cls.create_driver(browers)
            cls.logger.info("创建" + browers + "浏览器")
        return cls._dirver

    @classmethod
    def create_driver(cls, browers):
        driver = None
        if browers.lower() == "chrome":
            driver = webdriver.Chrome(executable_path=cls.Chrome_driver_path)
        elif browers.lower() == "firfox":
            driver = webdriver.Chrome()
        elif driver.lower() == "ie":
            driver = webdriver.Ie()
        elif driver.lower() == "Safari":
            driver = webdriver.Safari()
        else:
            #后期此处改为logger
            cls.logger.warn("您传入的参数browers异常")
            # print("您传入的参数browers异常")
        return driver
示例#7
0
 def clear_trash(self):
     """Finds trash mobs closest to the current fleet location and battles
     them until the boss spawns
     """
     while self.kills_needed > 0:
         blacklist = []
         tries = 0
         if self.resume_previous_sortie:
             self.resume_previous_sortie = False
             Utils.find_and_touch('combat_attack')
             Utils.script_sleep(2)
         else:
             self.avoided_ambush = True
         while not Utils.exists('combat_battle_start'):
             if Utils.find_and_touch('combat_evade'):
                 if Utils.wait_for_exist('combat_battle_start', 3):
                     self.avoided_ambush = False
                 else:
                     Logger.log_msg('Successfully avoided ambush.')
             elif Utils.find_and_touch('combat_items_received'):
                 pass
             else:
                 enemy_coord = self.get_closest_enemy()
                 if tries > 2:
                     blacklist.append(enemy_coord)
                     enemy_coord = self.get_closest_enemy(blacklist)
                 Logger.log_msg(
                     'Navigating to enemy fleet at {}'.format(enemy_coord))
                 Utils.touch(enemy_coord)
                 tries += 1
                 Utils.script_sleep(5)
         if self.conduct_prebattle_check():
             if self.conduct_battle():
                 self.need_to_refocus = True
             else:
                 self.resume_previous_sortie = True
                 while not (Utils.exists('home_menu_build')):
                     Utils.touch_randomly(self.region['nav_back'])
                 # Add logic for retirement here?
                 return False
         if self.avoided_ambush:
             self.kills_needed -= 1
         Logger.log_msg('Kills left for boss to spawn: {}'.format(
             self.kills_needed))
     Utils.script_sleep(1)
     return True
示例#8
0
    def read(self):
        backup_config = deepcopy(self.__dict__)
        config = configparser.ConfigParser()
        config.read(self.config_file)
        self.network['service'] = config.get('Network', 'Service')

        if config.getboolean('Combat', 'Enabled'):
            self._read_combat(config)
        else:
            self.combat = {'enabled': False}

        self.commissions['enabled'] = config.getboolean('Modules', 'Commissions')
        self.enhancement['enabled'] = config.getboolean('Modules', 'Enhancement')
        self.missions['enabled'] = config.getboolean('Modules', 'Missions')
        self.retirement['enabled'] = config.getboolean('Modules', 'Retirement')

        if config.getboolean('Events', 'Enabled'):
            self._read_event(config)
        else:
            self.events = {'enabled': False}

        self.validate()
        if (self.ok and not self.initialized):
            Logger.log_msg("Starting ALAuto!")
            self.initialized = True
            self.changed = True
        elif (not self.ok and not self.initialized):
            Logger.log_error("Invalid config. Please check your config file.")
            sys.exit(1)
        elif (not self.ok and self.initialized):
            Logger.log_warning("Config change detected, but with problems. Rolling back config.")
            self._rollback_config(backup_config)
        elif (self.ok and self.initialized):
            if backup_config != self.__dict__:
                Logger.log_warning("Config change detected. Hot-reloading.")
                self.changed = True
示例#9
0
def startup(spider):
    try:
        Logger.info("开始[%d]线程顺序抓取..." % spider.max_thread)
        task_list = []
        with ThreadPoolExecutor(spider.max_thread) as executor:
            for channel in spider.channels:
                # 异常抓取重试任务
                task = executor.submit(retry_failed, spider, channel)
                task_list.append(task)
                # 正常抓取
                task = executor.submit(get_page, spider, channel)
                task_list.append(task)
            for task in as_completed(task_list):
                Logger.info("线程[%s]执行完成" % str(task))
    except Exception as e:
        Logger.error("线程池启动抓取异常:{} \n {}".format(str(e),
                                                 traceback.format_exc()))
示例#10
0
    def build_agents(self):
        num_agents = self.env.get_num_agents()
        self.agents = []

        Logger.print('')
        Logger.print('Num Agents: {:d}'.format(num_agents))

        agent_files = self.arg_parser.parse_strings('agent_files')
        assert(len(agent_files) == num_agents or len(agent_files) == 0)

        model_files = self.arg_parser.parse_strings('model_files')
        assert(len(model_files) == num_agents or len(model_files) == 0)

        output_path = self.arg_parser.parse_string('output_path')
        int_output_path = self.arg_parser.parse_string('int_output_path')

        for i in range(num_agents):
            curr_file = agent_files[i]
            curr_agent = self._build_agent(i, curr_file)

            if curr_agent is not None:
                curr_agent.output_dir = output_path
                curr_agent.int_output_dir = int_output_path
                Logger.print(str(curr_agent))

                if (len(model_files) > 0):
                    curr_model_file = model_files[i]
                    if curr_model_file != 'none':
                        curr_agent.load_model(curr_model_file)

            self.agents.append(curr_agent)
            Logger.print('')

        self.set_enable_training(self.enable_training)

        return
示例#11
0
    def enhance_ship(self):
        """
        Method that selects the first (leftmost of the first row) favorite ship and proceeds to enhance her.
        """

        #selects ship
        Utils.touch_randomly(self.region['first_favorite_ship'])
        Utils.script_sleep(1)

        while True:
            Utils.update_screen()

            if Utils.find("enhancement/menu_enhance"):
                Logger.log_debug("Filling with ships.")
                #taps the "fill" button
                Utils.touch_randomly(self.region['fill_button'])
                Utils.update_screen()
            if Utils.find("enhancement/alert_no_items", 0.85):
                Logger.log_warning("Not enough ships to enhance.")
                break
            if Utils.find("enhancement/menu_level", 0.8):
                self.handle_retirement()
                Logger.log_msg("Successfully finished enhancing.")
                break
            if Utils.find("enhancement/menu_details"):
                Logger.log_debug("Opening enhance menu.")
                if not Utils.find("enhancement/menu_retrofit", 0.9):
                    Utils.touch_randomly(
                        self.region['enhance_tab_normal_ship'])
                else:
                    Utils.touch_randomly(self.region['enhance_tab_retro_ship'])
                continue

        Utils.touch_randomly(self.region['button_go_back'])
        Utils.script_sleep(1)
        return
示例#12
0
    def checkUpdate(self):
        version = ''
        latest_version = ''
        _file = open('version.txt', 'r')

        if self.config.updates['channel'] == 'Release':
            version = _file.readline()

            try:
                with request.urlopen(
                        "https://api.github.com/repos/egoistically/alauto/releases/latest"
                ) as f:
                    _json = json.loads(f.read().decode('utf-8'))
                    latest_version = _json["tag_name"]
            except error.HTTPError as e:
                Logger.log_error("Couldn't check for updates, {}.".format(e))

        else:
            version = _file.readlines()[1]

            try:
                with request.urlopen(
                        "https://raw.githubusercontent.com/Egoistically/ALAuto/master/version.txt"
                ) as f:
                    _f = f.read().decode('utf-8')
                    latest_version = _f.splitlines()[1]
            except error.HTTPError as e:
                Logger.log_error("Couldn't check for updates, {}.".format(e))

        _file.close()

        if version != latest_version:
            Logger.log_debug("Current version: " + version)
            Logger.log_debug("Latest version: " + latest_version)

            return True
示例#13
0
def lambda_handler(event, context):
    dt = datetime.utcnow()
    logstream = LOG_STREAM.format("OpsAutomatorMain", dt.year, dt.month, dt.day)

    with Logger(logstream=logstream, context=context, buffersize=20) as logger:

        logger.info("Ops Automator, version %version%")

        for handler_name in handlers.all_handlers():

            if handlers.get_class_for_handler(handler_name).is_handling_request(event):
                handler = handlers.create_handler(handler_name, event, context)
                logger.info("Handler is {}", handler_name)
                try:
                    result = handler.handle_request()
                    logger.info(MSG_REQUEST_HANLED, handler_name, (datetime.utcnow() - dt).total_seconds())
                    return safe_dict(result)
                except Exception as e:
                    logger.error(MSG_ERR_HANDLING_REQUEST, safe_json(event, indent=2), handler_name, e, traceback.format_exc())

                return

        logger.error(MSG_NO_REQUEST_HANDLER, safe_json(event, indent=2))
示例#14
0
    def collect_dorm_balloons(self):
        """"
        This method finds and collects all the dorm tokens and affinity points visible to the script.
        The various swipes may not work if there is a shipgirl at the starting point of the swipe.
        For this reason the wrapper to this methoed iterates its cycle for three times, refreshing the dorm.
        """
        Utils.script_sleep(1)
        # tap dorm eye in order to hide UI
        Utils.touch_randomly(self.region["dorm_eye_button"])
        Logger.log_debug("Collecting all visible dorm tokens/affinity points.")

        for i in range(0, 4):
            Utils.wait_update_screen(1)
            # since a rather low similarity is used, the variable j ensures a finite loop
            j = 0
            while Utils.find_and_touch("headquarters/dorm_token", 0.75) and j < 5:
                Logger.log_msg("Collected dorm token.")
                Utils.wait_update_screen()
                j += 1
            j = 0
            while Utils.find_and_touch("headquarters/affinity_point", 0.75) and j < 5:
                Logger.log_msg("Collected affinity points.")
                Utils.wait_update_screen()
                j += 1
            if i == 0:
                # swipe right and refresh
                Utils.swipe(960, 540, 560, 540, 300)
                continue
            if i == 1:
                # swipe left (also countering the previous swipe) and refresh
                Utils.swipe(960, 540, 1760, 540, 300)
                continue
            if i == 2:
                # undo previous swipe
                Utils.swipe(960, 540, 560, 540, 300)
                # swipe up and refresh
                Utils.swipe(960, 540, 960, 790, 300)
                continue
            if i == 3:
                # swipe bottom (also countering the previous swipe) and refresh
                Utils.swipe(960, 540, 960, 40, 300)
                continue

        # restore UI
        Utils.touch_randomly(self.region["dorm_eye_button"])
示例#15
0
    def urgent_handler(self):
        Utils.touch_randomly(self.region["urgent_tab"])

        while True:
            Utils.update_screen()

            if Utils.find_and_touch("commission/commission_status"):
                Logger.log_msg("Found status indicator on urgent commission.")
                if not self.start_commission():
                    Logger.log_msg("No more commissions to start.")
                    return False
            else:
                Utils.touch_randomly(self.region["daily_tab"])
                Logger.log_msg("No urgent commissions left.")
                break

        Utils.script_sleep(1)
        return True
示例#16
0
    def unable_handler(self, coords, boss=False):
        """
        Method called when the path to the target (boss fleet or mystery node) is obstructed by mobs: 
        it procedes to switch targets to the mobs which are blocking the path.

        Args:
            coords (list): coordinate_x, coordinate_y. These coordinates describe the target's location.
        """
        if boss:
            Logger.log_debug("Unable to reach boss function started.")
        else:
            Logger.log_debug(
                "Unable to reach selected target function started.")
        self.blacklist.clear()
        closest_to_unreachable_target = self.get_closest_target(self.blacklist,
                                                                coords,
                                                                boss=boss)

        Utils.touch(closest_to_unreachable_target)
        Utils.update_screen()

        if Utils.find("combat/alert_unable_reach"):
            Logger.log_warning("Unable to reach next to selected target.")
            self.blacklist.append(closest_to_unreachable_target[0:2])

            while True:
                closest_enemy = self.get_closest_target(self.blacklist)
                Utils.touch(closest_enemy)
                Utils.update_screen()

                if Utils.find("combat/alert_unable_reach"):
                    self.blacklist.append(closest_enemy[0:2])
                else:
                    break

            self.movement_handler(closest_enemy)
            if not self.battle_handler():
                return False
            return True
        else:
            self.movement_handler(closest_to_unreachable_target)
            if not self.battle_handler():
                return False
            return True
示例#17
0
def lambda_handler(event, context):
    try:
        dt = datetime.utcnow()
        log_stream = LOG_STREAM.format(dt.year, dt.month, dt.day)
        result = {}
        with Logger(logstream=log_stream, buffersize=20, context=context,
                    debug=util.as_bool(os.getenv(configuration.ENV_TRACE, False))) as logger:

            # logger.info("InstanceScheduler, Running locally!!")
            # logger.info("InstanceScheduler, version {}".format(VERSION))

            logger.debug("Event is {}", util.safe_json(event, indent=3))

            for handler_type in [SchedulerRequestHandler,
                                 SchedulerSetupHandler,
                                 ScheduleResourceHandler,
                                 AdminCliRequestHandler,
                                 CloudWatchEventHandler]:

                # logger.info("handler_type: {}", handler_type)

                if handler_type.is_handling_request(event):
                    start = time()
                    handler = handler_type(event, context)
                    logger.info("Handler is {}".format(handler_type.__name__))
                    try:
                        result = handler.handle_request()
                    except Exception as e:
                        logger.error("Error handling request {} by handler {}: ({})\n{}", json.dumps(event), handler_type.__name__,
                                     e, traceback.format_exc())
                    execution_time = round(float((time() - start)), 3)
                    logger.info("Handling took {} seconds", execution_time)
                    return result
            logger.debug("Request was not handled, no handler was able to handle this type of request {}", json.dumps(event))
    finally:
        configuration.unload_scheduler_configuration()
示例#18
0
class AuthServer(RNServer):
    logger = Logger('AuthServer')

    def __init__(self,
                 ip='127.0.0.1',
                 port=1001,
                 backlog=10000,
                 password=b'3.25 ND1',
                 database=None):
        super().__init__((ip, port), backlog, password)

        self.database = database

        self.add_handler(RNEvent.UserPacket, self.handle_packet)
        self.auth_handlers = {}

        self.register_handler(PacketHeaders.HANDSHAKE.value, Handshake)
        self.register_handler(PacketHeaders.CLIENT_LOGIN_REQ.value,
                              ClientLoginRequest)

    def handle_packet(self, packet, address):
        header = packet[0:8]
        if header in self.auth_handlers:
            self.auth_handlers[header].database = self.database
            res = self.auth_handlers[header].construct_packet(self, packet)
            if res is not None:
                self.send(res, address)
            else:
                self.logger.warn(
                    'unable to construct packet for header-{}'.format(header))
        else:
            self.logger.warn(
                'no registered handlers found for header-{}'.format(header))

    def register_handler(self, header, func):
        self.auth_handlers[header] = func
示例#19
0
 def get_datas(self):
     '''
     新建读取测试数据方法
     :return:
     '''
     # 定义空列表
     datas = []
     try:
         #打开指定文件
         xl = xlrd.open_workbook(self.file_path)
         #根据sheetName获取指定的sheet
         sheet = xl.sheet_by_index(self.sheet_id)
         items = sheet.row_values(0)
         #遍历excel的行数
         for nrow in range(1, sheet.nrows):
             #定义字典
             data = dict()
             values = sheet.row_values(nrow)
             for ncol in range(0, len(items)):
                 data[items[ncol]] = values[ncol]
             datas.append(data)
     except Exception as e:
         Logger().error("读取文件出错:{}".format(e))
     return datas
示例#20
0
def init_logger():
    log_level = getattr(logging, config['LOGGING']['LEVEL'], logging.INFO)

    Logger.setLevel(log_level)

    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(
        logging.Formatter(
            '[%(levelname)s -> %(name)s] at %(asctime)s in %(filename)s: %(lineno)s - %(message)s'
        ))

    Logger.addHandler(stream_handler)

    logging.getLogger(
        'sqlalchemy.engine.base.Engine').handlers = Logger.handlers

    app.logger.handlers = Logger.handlers
    app.logger.setLevel(log_level)

    Logger.info('Initializing logger...')
示例#21
0
    def print_stats(self):
        """Prints a summary of all the stats to console.
        """
        delta = datetime.now() - self.start_time
        hours = delta.total_seconds() / 3600

        if self.config.commissions['enabled']:
            Logger.log_success("Commissions sent: {} / received: {}".format(
                self._pretty_perhour(self.commissions_started, hours),
                self._pretty_perhour(self.commissions_received, hours)))

        if self.config.combat['enabled']:
            Logger.log_success("Combat done: {} / attempted: {}".format(
                self._pretty_perhour(self.combat_done, hours),
                self._pretty_perhour(self.combat_attempted, hours)))

        Logger.log_success(
            "ALAuto has been running for {} (started on {})".format(
                self._pretty_timedelta(delta),
                self.start_time.strftime('%Y-%m-%d %H:%M:%S')))
示例#22
0
def retry_failed(spider, channel):
    Logger.info("开始重试任务...")
    try:
        retry_list = mysql.query_toretry_task(channel.pk_channel)
        if len(retry_list) > 0:
            _chrome = chrome.Chrome()
            for retry_info in retry_list:
                get_article(_chrome, retry_info["title"],
                            retry_info["src_url"], retry_info["pub_time"],
                            spider, channel)
                # 如果任务未失败(retry_info未增加),删除任务
                mysql.delete_toretry_task(channel.pk_channel,
                                          retry_info["src_url"],
                                          retry_info["total_times"])
            # 停止重试超过5次的任务
            mysql.stop_toretry_task(channel.pk_channel)
            _chrome.quit()
        Logger.info("重试任务完成")
    except Exception as e:
        Logger.error("重试任务异常:{} \n {}".format(str(e), traceback.format_exc()))
示例#23
0
def run(dir, inpath):
    if not os.path.exists(dir):
        os.makedirs(dir)
    logfile = Logger(dir + '/qnrsearch.log')
    logfile.open()
    try:
        run_sieves(
            [
                (dnareader, {'item_limit': 0}),
                (hmmsearch, {'model_path': sys.path[0]+'/hmm_models/model.hmm', 'hmmsearch_out': dir+'/hmmsearch_out', 'write_only_domain': True}),
                (sga, {'error_rate': 0.05, 'min_assembly_overlap': 20, 'min_merge_overlap': 20, 'resolve_small': 5 })
            ],
            ['', dir+'/fragments.db', dir+'/fragments_passed.db', dir+'/clusters.db'],   #dbs
            [inpath, dir+'/dnareader.pfa', dir+'/fragments_passed.nfa', dir+'/clusters.nfa'],   #files
            logfile,
            level,
            0,2
        )
    finally:
        logfile.close()
示例#24
0
    def __create_buddies(self):
        Logger.get_logger().info("Parsing buddies data")

        data = {name: [] for name in self.names}

        for _idx, record in enumerate(tqdm(self._raw_data, disable=not self._verbose)):
            if record["buddies"] is None:
                Logger.get_logger().debug("Buddies found to be None")
                continue

            buddies = [name.lower() for name in record["buddies"]]
            ts = record["timestamp"]

            for name in buddies:
                data[name].append(pendulum.parse(ts))

        Logger.get_logger().info("Generating buddies")
        for name in tqdm(data, disable=not self._verbose):
            data[name] = Buddy(data[name])

        return data
示例#25
0
    def combat_handler(self):
        Logger.log_msg("Starting combat.")
        Utils.touch_randomly(self.region['menu_combat_start'])
        Utils.script_sleep(4)

        while True:
            Utils.wait_update_screen(1)

            if Utils.find("event/button_no"):
                Utils.touch_randomly(self.region['combat_button_no'])
                Utils.script_sleep(1)
                continue
            if Utils.find("combat/combat_pause", 0.7):
                Logger.log_debug("In battle.")
                Utils.script_sleep(5)
                continue
            if Utils.find("combat/menu_touch2continue"):
                Utils.touch_randomly(self.region['tap_to_continue'])
                continue
            if Utils.find("menu/item_found"):
                Utils.touch_randomly(self.region['tap_to_continue'])
                Utils.script_sleep(1)
                continue
            if Utils.find("combat/button_retry"):
                Logger.log_msg("Combat ended.")
                Utils.touch_randomly(self.region['combat_end_confirm'])
                self.stats.increment_combat_done()
                Utils.script_sleep(1)
                return
            if Utils.find("combat/commander"):
                # prevents fleet with submarines from getting stuck at combat end screen
                Utils.touch_randomly(
                    self.region["combat_dismiss_surface_fleet_summary"])
                Utils.script_sleep(1)
                continue
            if Utils.find("combat/menu_combat_finished"):
                Utils.touch_randomly(self.region['dismiss_combat_finished'])
                Utils.script_sleep(1)
                continue
示例#26
0
    def init_screencap_mode(cls,mode):
        consts = UtilConsts.ScreenCapMode

        cls.screencap_mode = mode

        if cls.screencap_mode == consts.ASCREENCAP:
            # Prepare for ascreencap, push the required libraries
            Adb.exec_out('rm /data/local/tmp/ascreencap')
            cpuArc = Adb.exec_out('getprop ro.product.cpu.abi').decode('utf-8').strip()
            sdkVer = int(Adb.exec_out('getprop ro.build.version.sdk').decode('utf-8').strip())
            ascreencaplib = 'ascreencap_{}'.format(cpuArc)
            if sdkVer in range(21, 26) and os.path.isfile(ascreencaplib):
                Adb.cmd('push {} /data/local/tmp/ascreencap'.format(ascreencaplib))
            else:
                Logger.log_warning(
                    'No suitable version of aScreenCap lib is available locally, using ascreencap_local...')
                if os.path.isfile('ascreencap_local'):
                    Adb.cmd('push ascreencap_local /data/local/tmp/ascreencap')
                else:
                    Logger.log_error(
                        'File "ascreencap_local" not found. Please download the appropriate version of aScreenCap for your device from github.com/ClnViewer/Android-fast-screen-capture and save it as "ascreencap_local"')
                    Logger.log_warning('Since aScreenCap is not ready, falling back to normal adb screencap')
                    Utils.useAScreenCap = False
            Adb.shell('chmod 0777 /data/local/tmp/ascreencap')
示例#27
0
    def battle_handler(self, boss=False):
        Logger.log_msg("Starting combat.")

        while not Utils.find("combat/menu_loading", 0.8):
            Utils.update_screen()

            if Utils.find("combat/alert_morale_low") or Utils.find(
                    "menu/button_sort"):
                self.retreat_handler()
                return False
            else:
                Utils.touch_randomly(self.region["menu_combat_start"])
                Utils.script_sleep(1)

        Utils.script_sleep(4)

        while True:
            Utils.update_screen()

            if Utils.find("combat/alert_lock"):
                Logger.log_msg("Locking received ship.")
                Utils.touch_randomly(Region(1086, 739, 200, 55))
                continue
            if Utils.find("combat/combat_pause", 0.7):
                Logger.log_debug("In battle.")
                Utils.script_sleep(5)
                continue
            if Utils.find("combat/menu_touch2continue"):
                Utils.touch_randomly(Region(661, 840, 598, 203))
                continue
            if Utils.find("menu/item_found"):
                '''if boss:
                    DM.find_droped()
                '''
                Utils.touch_randomly(Region(661, 840, 598, 203))
                Utils.script_sleep(1)
                continue
            if Utils.find("menu/drop_ssr"):
                Logger.log_msg("Received SSR ship as drop.")
                Utils.touch_randomly(Region(1228, 103, 692, 735))
                continue
            if Utils.find("menu/drop_elite"):
                Logger.log_msg("Received ELITE ship as drop.")
                Utils.touch_randomly(Region(1228, 103, 692, 735))
                continue
            if Utils.find("combat/button_confirm"):
                Logger.log_msg("Combat ended.")
                Utils.touch_randomly(self.region["combat_end_confirm"])
                Utils.script_sleep(1)
                if boss:
                    return True
                Utils.update_screen()
            if Utils.find("commission/button_confirm"):
                Logger.log_msg("Found commission info message.")
                Utils.touch_randomly(self.region["combat_com_confirm"])
                continue
            if Utils.find("combat/button_retreat"):
                Utils.script_sleep(3)
                Utils.touch_randomly(self.region["hide_strat_menu"])
                return
            if Utils.find("combat/commander"):
                Utils.touch_randomly(self.region["combat_end_confirm"])
                continue
示例#28
0
import sys
import core.bonds

__all__ = ["Calculation",
           "CalculationCache",
           "CalculationSettings",
           "calculated",
           "count_frames",
           "calculated_frames",
           "calculate_cavities",
           "getresults",
           "delete_center_cavity_information",
           "timestamp",
           "calculate"]

logger = Logger("core.calculation")
logger.setstream("default", sys.stdout, Logger.WARNING)


class CalculationSettings(object):
    """
    Structure to store the parameters for one or more calculation.

    **Attributes:**
        `datasets` :
            Dictionary, which contains filenames (Strings) as keys.
            Each value is a list of Integers, which contains the frames.
            The value ``[-1]`` means 'all frames'.
        `resolution` :
            resolution parameter for the discretization
        `domains` :
示例#29
0
def main(args):

    #initial setup
    if args.checkpoint == '':
        args.checkpoint = "checkpoints1/ic19val_%s_bs_%d_ep_%d" % (
            args.arch, args.batch_size, args.n_epoch)
    if args.pretrain:
        if 'synth' in args.pretrain:
            args.checkpoint += "_pretrain_synth"
        else:
            args.checkpoint += "_pretrain_ic17"

    print(('checkpoint path: %s' % args.checkpoint))
    print(('init lr: %.8f' % args.lr))
    print(('schedule: ', args.schedule))
    sys.stdout.flush()

    if not os.path.isdir(args.checkpoint):
        os.makedirs(args.checkpoint)

    kernel_num = 7
    min_scale = 0.4
    start_epoch = 0
    validation_split = 0.1
    random_seed = 42
    prev_val_loss = -1
    val_loss_list = []
    loggertf = tfLogger('./log/' + args.arch)
    #end

    #setup data loaders
    data_loader = IC19Loader(is_transform=True,
                             img_size=args.img_size,
                             kernel_num=kernel_num,
                             min_scale=min_scale)
    dataset_size = len(data_loader)
    indices = list(range(dataset_size))
    split = int(np.floor(validation_split * dataset_size))
    np.random.seed(random_seed)
    np.random.shuffle(indices)
    train_incidies, val_indices = indices[split:], indices[:split]
    train_sampler = SubsetRandomSampler(train_incidies)
    validate_sampler = SubsetRandomSampler(val_indices)

    train_loader = torch.utils.data.DataLoader(data_loader,
                                               batch_size=args.batch_size,
                                               num_workers=3,
                                               drop_last=True,
                                               pin_memory=True,
                                               sampler=train_sampler)

    validate_loader = torch.utils.data.DataLoader(data_loader,
                                                  batch_size=args.batch_size,
                                                  num_workers=3,
                                                  drop_last=True,
                                                  pin_memory=True,
                                                  sampler=validate_sampler)
    #end

    #Setup architecture and optimizer
    if args.arch == "resnet50":
        model = models.resnet50(pretrained=True, num_classes=kernel_num)
    elif args.arch == "resnet101":
        model = models.resnet101(pretrained=True, num_classes=kernel_num)
    elif args.arch == "resnet152":
        model = models.resnet152(pretrained=True, num_classes=kernel_num)
    elif args.arch == "resPAnet50":
        model = models.resPAnet50(pretrained=True, num_classes=kernel_num)
    elif args.arch == "resPAnet101":
        model = models.resPAnet101(pretrained=True, num_classes=kernel_num)
    elif args.arch == "resPAnet152":
        model = models.resPAnet152(pretrained=True, num_classes=kernel_num)

    model = torch.nn.DataParallel(model).cuda()

    if hasattr(model.module, 'optimizer'):
        optimizer = model.module.optimizer
    else:
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=0.99,
                                    weight_decay=5e-4)
    #end

    #options to resume/use pretrained model/train from scratch
    title = 'icdar2019MLT'
    if args.pretrain:
        print('Using pretrained model.')
        assert os.path.isfile(
            args.pretrain), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.pretrain)
        model.load_state_dict(checkpoint['state_dict'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Train Acc.', 'Train IOU.',
            'Validate Loss', 'Validate Acc', 'Validate IOU'
        ])
    elif args.resume:
        print('Resuming from checkpoint.')
        assert os.path.isfile(
            args.resume), 'Error: no checkpoint directory found!'
        checkpoint = torch.load(args.resume)
        start_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
                        title=title,
                        resume=True)
    else:
        print('Training from scratch.')
        logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
        logger.set_names([
            'Learning Rate', 'Train Loss', 'Train Acc.', 'Train IOU.',
            'Validate Loss', 'Validate Acc', 'Validate IOU'
        ])
    #end

    #start training model
    for epoch in range(start_epoch, args.n_epoch):
        adjust_learning_rate(args, optimizer, epoch)
        print(('\nEpoch: [%d | %d] LR: %f' %
               (epoch + 1, args.n_epoch, optimizer.param_groups[0]['lr'])))

        train_loss, train_te_acc, train_ke_acc, train_te_iou, train_ke_iou = train(
            train_loader, model, dice_loss, optimizer, epoch, loggertf)
        val_loss, val_te_acc, val_ke_acc, val_te_iou, val_ke_iou = validate(
            validate_loader, model, dice_loss)

        #logging on tensorboard
        loggertf.scalar_summary('Training/Accuracy', train_te_acc, epoch + 1)
        loggertf.scalar_summary('Training/Loss', train_loss, epoch + 1)
        loggertf.scalar_summary('Training/IoU', train_te_iou, epoch + 1)
        loggertf.scalar_summary('Validation/Accuracy', val_te_acc, epoch + 1)
        loggertf.scalar_summary('Validation/Loss', val_loss, epoch + 1)
        loggertf.scalar_summary('Validation/IoU', val_te_iou, epoch + 1)
        #end

        #Boring Book Keeping
        print(("End of Epoch %d", epoch + 1))
        print((
            "Train Loss: {loss:.4f} | Train Acc: {acc: .4f} | Train IOU: {iou_t: .4f}"
            .format(loss=train_loss, acc=train_te_acc, iou_t=train_te_iou)))
        print((
            "Validation Loss: {loss:.4f} | Validation Acc: {acc: .4f} | Validation IOU: {iou_t: .4f}"
            .format(loss=val_loss, acc=val_te_acc, iou_t=val_te_iou)))
        #end

        #Saving improving and Best Models
        val_loss_list.append(val_loss)
        if (val_loss < prev_val_loss or prev_val_loss == -1):
            checkpointname = "{loss:.3f}".format(
                loss=val_loss) + "_epoch" + str(epoch +
                                                1) + "_checkpoint.pth.tar"
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'lr': args.lr,
                    'optimizer': optimizer.state_dict(),
                },
                checkpoint=args.checkpoint,
                filename=checkpointname)
        if (val_loss < min(val_loss_list)):
            checkpointname = "best_checkpoint.pth.tar"
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'lr': args.lr,
                    'optimizer': optimizer.state_dict(),
                },
                checkpoint=args.checkpoint,
                filename=checkpointname)
        #end
        prev_val_loss = val_loss
        logger.append([
            optimizer.param_groups[0]['lr'], train_loss, train_te_acc,
            train_te_iou, val_loss, val_te_acc, val_te_iou
        ])
    #end traing model
    logger.close()
示例#30
0
import base64

from django.db.models import F, Q
from xos.config import Config
from observer.syncstep import SyncStep
from core.models import Service
from hpc.models import ServiceProvider, ContentProvider, CDNPrefix, OriginServer
from util.logger import Logger, logging

# hpclibrary will be in steps/..
parentdir = os.path.join(os.path.dirname(__file__), "..")
sys.path.insert(0, parentdir)

from hpclib import HpcLibrary

logger = Logger(level=logging.INFO)


class SyncOriginServer(SyncStep, HpcLibrary):
    provides = [OriginServer]
    observes = OriginServer
    requested_interval = 0

    def __init__(self, **args):
        SyncStep.__init__(self, **args)
        HpcLibrary.__init__(self)

    def filter_hpc_service(self, objs):
        hpcService = self.get_hpc_service()

        return [
 def __init__(self, schedule_task_queue):
     Thread.__init__(self)
     self.__logger = Logger.get(InfoTransfer.__name__)
     self.schedule_task_queue = schedule_task_queue
示例#32
0
 def run_update_check(self):
     if self.modules['updates']:
         if self.modules['updates'].checkUpdate():
             Logger.log_warning("A new release is available, please check the github.")
示例#33
0
                    help='Use the specified configuration file instead ' +
                         'of the default config.ini')
parser.add_argument('-d', '--debug',
                    help='Enables debugging logs.', action='store_true')
parser.add_argument('-l', '--legacy',
                    help='Enables sed usage.', action='store_true')
args = parser.parse_args()

# check args, and if none provided, load default config
if args:
    if args.config:
        config = Config(args.config)
    else:
        config = Config('config.ini')
    if args.debug:
        Logger.log_info("Enabled debugging.")
        Logger.enable_debugging(Logger)
    if args.legacy:
        Logger.log_info("Enabled sed usage.")
        Adb.enable_legacy(Adb)

script = ALAuto(config)
script.run_update_check()

Adb.service = config.network['service']
Adb.device = '-d' if (Adb.service == 'PHONE') else '-e'
adb = Adb()

if adb.init():
    Logger.log_msg('Successfully connected to the service.')
    output = Adb.exec_out('wm size').decode('utf-8').strip()
示例#34
0
文件: init.py 项目: wiseyee/stock
def init_data():
    """ 初始化数据 """
    print('Initialization of the data will take a long time please wait')
    TradeCalendarUpdater().start()
    Logger.info('trade_calendar data has been initialized')
    StockBasicUpdater().start()
    Logger.info('stock_basic data has been initialized')
    StockCompanyUpdater().start()
    Logger.info('stock_company data has been initialized')
    StockMonthlyUpdater().start()
    Logger.info('stock_monthly data has been initialized')
    StockWeeklyUpdater().start()
    Logger.info('stock_weekly data has been initialized')
    StockDailyUpdater().start()
    Logger.info('stock_daily data has been initialized')
    DailyBasicUpdater().start()
    Logger.info('daily_basic data has been initialized')
    ConceptUpdater().start()
    Logger.info('concept data has been initialized')
    ConceptDetailUpdater().start()
    Logger.info('concept_detail data has been initialized')
    Logger.info('All initialization of data has been completed')
示例#35
0
文件: curator.py 项目: Tontonis/muons
 def __init__(self):
     self.log = Logger(__name__).setup()
示例#36
0
    def movement_handler(self, target_info):
        """
        Method that handles the fleet movement until it reach its target (mystery node or enemy node).
        If the coordinates are wrong, they will be blacklisted and another set of coordinates to work on is obtained.
        If the target is a mystery node and what is found is ammo, then the method will fall in the blacklist case
        and search for another enemy: this is inefficient and should be improved, but it works.

        Args:
            target_info (list): coordinate_x, coordinate_y, type. Describes the selected target.
        Returns:
            (int): 1 if a fight is needed, otherwise 0.
        """
        Logger.log_msg("Moving towards objective.")
        count = 0
        location = [target_info[0], target_info[1]]
        Utils.script_sleep(1)

        while True:
            Utils.update_screen()
            event = self.check_movement_threads()

            if event["combat/button_evade"]:
                Logger.log_msg("Ambush was found, trying to evade.")
                Utils.touch_randomly(self.region["combat_ambush_evade"])
                Utils.script_sleep(0.5)
                continue
            if event["combat/alert_failed_evade"]:
                Logger.log_warning("Failed to evade ambush.")
                Utils.touch_randomly(self.region["menu_combat_start"])
                self.battle_handler()
                continue
            if event["menu/item_found"]:
                Logger.log_msg("Item found on node.")
                Utils.touch_randomly(Region(661, 840, 598, 203))
                if Utils.find("combat/menu_emergency"):
                    Utils.script_sleep(1)
                    Utils.touch_randomly(self.region["hide_strat_menu"])
                if target_info[2] == "mystery_node":
                    Logger.log_msg("Target reached.")
                    return 0
                continue
            if event["menu/alert_info"]:
                Logger.log_debug("Found alert.")
                Utils.find_and_touch("menu/alert_close")
                continue
            if event["combat/menu_formation"] or event["combat/menu_loading"]:
                return 1
            else:
                if count != 0 and count % 3 == 0:
                    Utils.touch(location)
                if count > 21:
                    Logger.log_msg(
                        "Blacklisting location and searching for another enemy."
                    )
                    self.blacklist.append(location)
                    self.l.clear()

                    location = self.get_closest_target(self.blacklist)
                    count = 0
                count += 1
def run_SAE_experiment(pretrain_lr=0.1,pretraining_epochs=3300,
		       finetune_lr=0.1,training_epochs=4e5,
		       L1_reg=0.0,L2_reg=1e-4,
		       dataset='KSC.pkl',
		       split_proportions = [6,2,2],
		       hidden_layers_sizes=[20],
		       corruption_levels=[0.],
		       batch_size=20,
		       log_file='log',
		       restart = False,
		       use_rate_schedule=True,
		       load_pretrained_weights=False):
    """
    Reproduce the paper...
    """
    assert not(restart and load_pretrained_weights)
    assert not(load_pretrained_weights and len(hidden_layers_sizes)!=5)
    assert len(hidden_layers_sizes)==len(corruption_levels), \
           "Error: hidden_layers_sizes and corruption_levels need to be of equal length"
	 
    pretrain_rate_decay = (type(pretrain_lr)==tuple)
    train_rate_decay = (type(finetune_lr)==tuple)
    assert pretrain_rate_decay or type(pretrain_lr)==float
    assert train_rate_decay or type(finetune_lr)==float
    assert not (use_rate_schedule and train_rate_decay), ('Error:',
      'Can not use adaptive rate schedule and linear rate schedule together' )
    
    #cast number of epochsto int
    pretraining_epochs = int(pretraining_epochs)
    training_epochs = int(training_epochs)
    
    #check for linear rate schedules
    if pretrain_rate_decay:
      linear_pretrain_rates = True
      pretrain_rates = numpy.linspace(pretrain_lr[0],pretrain_lr[1],pretraining_epochs)
    else:
      pretrain_rates = [pretrain_lr]*pretraining_epochs
    
    if train_rate_decay:
      linear_train_rates = True
      train_rates = numpy.linspace(finetune_lr[0],finetune_lr[1],training_epochs)
    else:
      train_rates = [finetune_lr]*training_epochs
	 
    #create a log object
    logger = Logger(log_file)
    
    #log run params
    if restart: logger.log("Restarting run using old best_model")
    logger.log("Running SAE Experiment...")
    logger.add_newline()
    logger.log("Runtime params:")
    logger.log("pretrain_lr=%s" % str(pretrain_lr))
    logger.log("pretraining_epochs=%d" % pretraining_epochs)
    logger.log("finetune_lr=%s" % str(finetune_lr))
    logger.log("training_epochs=%d" % training_epochs)
    logger.log("L1_reg=%f" % L1_reg)
    logger.log("L2_reg=%f" % L2_reg)
    logger.log("dataset=%s" % dataset)
    logger.log("split_proportions=%s" % str(split_proportions))
    logger.log("hidden_layers_sizes=%s" % str(hidden_layers_sizes))
    logger.log("corruption_levels=%s" % str(corruption_levels))
    logger.log("batch_size=%d" % batch_size)
    logger.log("use_rate_schedule=%s" % use_rate_schedule)
    logger.log("load_pretrained_weights=%s" % load_pretrained_weights)
    logger.add_newline()
    
    
    datasets = load_data(dataset,split_proportions,logger)
    
    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]    
    
    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size
    
    # numpy random generator    
    numpy_rng = numpy.random.RandomState(89677)
    logger.log( '... building the model')
    # construct the stacked denoising autoencoder class    
    #since labels were cast to int32 need to do this to get the shared variable 
    shared_train_set_y = train_set_y.owner.inputs[0]   
    if not restart:
      sda = SdA(
          numpy_rng=numpy_rng,
          n_ins=train_set_x.get_value(borrow=True).shape[1],
          hidden_layers_sizes=hidden_layers_sizes,
          n_outs=numpy.unique(shared_train_set_y.get_value(borrow=True)).size,
          L1_reg=L1_reg,
          L2_reg=L2_reg
      )
    elif restart:
      logger.log("loading model from best_model.pkl")
      sda = cPickle.load(open('best_model.pkl','r'))
    elif load_pretrained_weights:
      logger.log("loading model from pretrained_model.pkl")
      sda = cPickle.load(open('pretrained_model.pkl','r'))

    #create dictionary to store training stat accumulation arrays for easy pickling
    train_stat_dict = {}

    #########################
    # PRETRAINING THE MODEL #
    #########################
    pretrainig_costs = [ [] for i in xrange(sda.n_layers) ] # average pretrainig cost at each epoch
    train_stat_dict['pretrainig_costs'] = pretrainig_costs
    if not (restart or load_pretrained_weights or SKIP_PRETRAINING):
      logger.log( '... getting the pretraining functions')
      pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
                                                  batch_size=batch_size)
      
      logger.log( '... pre-training the model')
      start_time = timeit.default_timer()
      
      ## Pre-train layer-wise    
      for i in xrange(sda.n_layers):
          # go through pretraining epochs
          for epoch in xrange(pretraining_epochs):
              # go through the training set
              c = []
              for batch_index in xrange(n_train_batches):
                  c.append(pretraining_fns[i](index=batch_index,
                           corruption=corruption_levels[i],
                           lr=pretrain_rates[epoch]))
              logger.log('Pre-training layer %i, epoch %d, cost ' % (i, epoch) )
              logger.log( str(numpy.mean(c)) )
              pretrainig_costs[i].append(numpy.mean(c))
      
      end_time = timeit.default_timer()
      
      #save the pretrained model
      with open('pretrained_model.pkl', 'w') as f:
          cPickle.dump(sda, f)
      
      logger.log( 'The pretraining code for file ' +
                            os.path.split(__file__)[1] +
                            ' ran for %.2fm' % ((end_time - start_time) / 60.)
      )
    else:
      logger.log("skipping pretraining")
    
    ########################
    # FINETUNING THE MODEL #
    ########################

    # get the training, validation and testing function for the model
    logger.log( '... getting the finetuning functions')
    ( train_fn, validate_model_NLL, 
      validate_model_zero_one, test_model ) = sda.build_finetune_functions(
        datasets=datasets,
        batch_size=batch_size
        )

    logger.log( '... finetunning the model')
    # early-stopping parameters
    patience = 100 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is
                            # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    best_validation_loss = numpy.inf
    test_score = 0.
    start_time = timeit.default_timer()

    done_looping = False
    epoch = 0    
    iter = 0 # global minibatch iteration
    minibatch_avg_NLL = [] #array to accumulate NLL cost over over minibatches
    training_NLL = []     # average training NLL cost at each epoch (really after val_freq iters)
    validation_NLL = []  # average validation NLL cost at each epoch
    validation_zero_one = [] # average zero one cost at each epoch (% misclassified)
    train_stat_dict['training_NLL'] = training_NLL
    train_stat_dict['validation_NLL'] = validation_NLL
    train_stat_dict['validation_zero_one'] = validation_zero_one    
    while (epoch < training_epochs) and (not done_looping):        
        epoch = epoch + 1                
        for minibatch_index in xrange(n_train_batches):
	    iter += 1
            minibatch_avg_NLL.append( train_fn(minibatch_index,lr= train_rates[epoch-1] ) )

            if iter % validation_frequency == 0:	
              """validation zero one loss """	      
              validation_zero_one_losses = validate_model_zero_one()
              validation_zero_one.append( numpy.mean(validation_zero_one_losses) )              
                  
	      #validation NLL cost
	      validation_NLL_losses = validate_model_NLL()
	      validation_NLL.append( numpy.mean(validation_NLL_losses) )
	      
	      #training NLL cost
	      training_NLL.append( numpy.mean(minibatch_avg_NLL) )
	      minibatch_avg_NLL = [] #reset the NLL accumulator
	      
	      logger.log( 'epoch %i, minibatch %i/%i:' % (epoch, minibatch_index + 1, n_train_batches))
	      logger.log('\ttraining NLL loss: %f ' % training_NLL[-1])
	      logger.log('\tvalidation NLL loss: %f ' % validation_NLL[-1])
	      logger.log('\tvalidation zero one loss: %f %%' % (validation_zero_one[-1] * 100.))

              # if we got the best validation score until now
              if validation_zero_one[-1] < best_validation_loss: 		  
                                    
                  #improve patience if loss improvement is good enough
                  if (
                      validation_zero_one[-1] < best_validation_loss *
                      improvement_threshold
                  ):
                      patience = max(patience, iter * patience_increase)
                  else:
		      print "improvemnt not good enough: %f" % (validation_zero_one[-1]/best_validation_loss)
                      
                  # save best validation score and iteration number
                  best_validation_loss = validation_zero_one[-1]
                  best_iter = iter

                  # test it on the test set
                  test_zero_one_losses = test_model()
                  test_score = numpy.mean(test_zero_one_losses)
                  print '\t\ttest zero one loss of best model %f %%' % (test_score * 100.)
		
	          #save the best model
	          with open('best_model.pkl', 'w') as f:
                      cPickle.dump(sda, f)
	      
            if patience <= iter:
	        pass
                #done_looping = True
                #break
        if use_rate_schedule and epoch%100==0:
	      if validation_NLL[epoch-100]-validation_NLL[epoch-1]<1e-4:	
		finetune_lr = max(finetune_lr/2.,1e-6)
		train_rates = [finetune_lr]*training_epochs
		logger.log("Reducing learning rate. new rate: %f" % finetune_lr)	          
        
    #save train_stat_dict to a .mat file
    sio.savemat('train_stats.mat',train_stat_dict)
    #with open('train_stat_dict.pkl','w') as f:
    #  cPickle.dump(train_stat_dict,f)
    end_time = timeit.default_timer()
    logger.log(
        (
            'Optimization complete with best validation score of %f %%, '
            'on iteration %i, '
            'with test performance %f %%'
        )
        % (best_validation_loss * 100., best_iter, test_score * 100.)
    )
    logger.log ('The training code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.)
    )

    logger.close()
示例#38
0
class MuonDetector(gps.GPSListener, adc.EventListener, object):
    """
    Main entry point into the muon detector program.
    """

    def __init__(self):
        self.log = Logger(__name__).setup()

        # Read our node ID
        self.node_id = eeprom.get_id()

        # Load the GPS module
        self.gps_module = gps.GPS(self)

        # Load the ADC module
        self.adc_module = adc.ADC(self)

        # Load the temperature and pressure sensors
        self.temp_module = temp.TemperatureSensor()
        self.pressure_module = pressure.PressureSensor()

        self.filesystem = client.FileSystem("root://localhost//tmp")
        self.create_event_file()

        # Register signal handler
        signal.signal(signal.SIGINT, self.signal_handler)

    def run(self):
        """
        Main function that will load all the necessary modules and start
        taking data.
        """
        self.gps_module.start()
        self.adc_module.start()

        self.log.info("Muon detector is running")

        # Wait until the process is killed
        signal.pause()

    def on_event(self, data, timestamp):
        """
        Do something when we get an event from the ADC
        """
        event = Event(self.node_id, data, timestamp,
                      self.gps_module.current_timestamp,
                      self.gps_module.current_lat,
                      self.gps_module.current_lon,
                      self.temp_module.read_temperature(),
                      self.temp_module.read_humidity(),
                      self.pressure_module.read_pressure())

        self.temp_module.read_firmware_version()
        self.log.debug("Got event from ADC: %s" % event)

        # Dump the event to a file
        print(json.dumps(event.__dict__), file=open('/tmp/event.txt', 'a'))

    def on_gpgga(self, gpgga):
        """
        Do something when we get a GPS pulse
        """
        self.log.debug("on_gpgga(): %s %s %s" % (gpgga.timestamp,
                                                 gpgga.latitude,
                                                 gpgga.longitude))

    def create_event_file(self):
        """
        Creates an event file in the xrootd filesystem (if it doesn't exist)
        """
        self.log.debug("Creating initial event file... (did you remember to "
                       "start xrootd? ;)")

        status, statinfo = self.filesystem.stat("/tmp/event.txt")
        if statinfo is None:
            with client.File() as f:
                f.open("root://localhost//tmp/event.txt", OpenFlags.NEW)

    def signal_handler(self, signal, frame):
        """
        Called when the process receives SIGINT
        """
        self.log.info('Received Ctrl-C')
        self.adc_module.cleanup()
        sys.exit(0)
示例#39
0
from __future__ import absolute_import

from PyQt5 import QtCore, QtWidgets
import gr
from qtgr import GRWidget
import csv

from util.logger import Logger
import sys
import numpy as np
import os
from math import floor


logger = Logger("gui.histogram_widget")
logger.setstream("default", sys.stdout, Logger.DEBUG)


class GrHistogramWidget(GRWidget):
    def __init__(self, *args, **kwargs):
        super(GrHistogramWidget, self).__init__(*args, **kwargs)

        self.xvalues = None
        self.yvalues = None
        self.title = None
        self.datapoints = None

    def setdata(self, xvalues, yvalues, widths, title):
        self.xvalues = xvalues
        self.yvalues = yvalues
示例#40
0
    def combat_logic_wrapper(self):
        """Method that fires off the necessary child methods that encapsulates
        the entire action of sortieing combat fleets and resolving combat.

        Returns:
            int: 1 if boss was defeated, 2 if morale is too low and 3 if dock is full.
        """
        self.exit = 0
        self.l.clear()
        self.blacklist.clear()

        while True:
            Utils.wait_update_screen()

            if Utils.find("menu/button_sort"):
                Utils.touch_randomly(Region(1326, 274, 35, 35))
                self.exit = 3
            if Utils.find("combat/alert_morale_low"):
                Utils.touch_randomly(Region(1326, 274, 35, 35))
                self.exit = 2
                break
            if Utils.find("commission/button_confirm"):
                Logger.log_msg("Found commission info message.")
                Utils.touch_randomly(self.region["combat_com_confirm"])
                continue
            if Utils.find("menu/button_battle"):
                Logger.log_debug("Found menu battle button.")
                Utils.touch_randomly(self.region["menu_button_battle"])
                Utils.wait_update_screen(1)
                continue
            if Utils.find("combat/menu_select_fleet"):
                Logger.log_debug("Found fleet select go button.")
                Utils.touch_randomly(self.region["fleet_menu_go"])
                continue
            if Utils.find("combat/button_go"):
                Logger.log_debug("Found map summary go button.")
                Utils.touch_randomly(self.region["map_summary_go"])
                continue
            if Utils.find("combat/button_retreat"):
                Logger.log_debug(
                    "Found retreat button, starting clear function.")
                if not self.clear_map():
                    self.stats.increment_combat_attempted()
                    break
            if self.exit == 1:
                self.stats.increment_combat_done()
                break
            if self.exit > 1:
                self.stats.increment_combat_attempted()
                break
            if Utils.find("menu/button_normal_mode"):
                Logger.log_debug("Disabling hard mode.")
                Utils.touch_randomly(Region(88, 990, 80, 40))
                Utils.wait_update_screen(1)
            if Utils.find_and_touch('maps/map_{}'.format(self.chapter_map),
                                    0.8):
                Logger.log_msg("Found specified map.")
                continue
            else:
                self.reach_map()
                continue

        Utils.script_sleep(1)
        Utils.menu_navigate("menu/button_battle")

        return self.exit
示例#41
0
def run_deepNet(dataset = 'KSC.pkl',
		split_proportions = [6,2,2],                
		hidden_sizes = [20],
		hidden_nonlinearity = lasagne.nonlinearities.rectify,
		dropout_probs = [0.5],
		learning_rate = 0.1,
		momentum = 0.9,
		num_epochs = int(5e4),
		minibatch_size = 64,
		log_file = 'log'):
  
  #create a log object
  logger = Logger(log_file)
  
  #log run params
  logger.log("Running run_deepNet Experiment...")
  logger.add_newline()
  logger.log("Runtime params:")  
  logger.log("dataset=%s" % dataset)
  logger.log("split_proportions=%s" % str(split_proportions))
  logger.log("hidden_sizes=%s" % str(hidden_sizes))
  logger.log("hidden_nonlinearity=%s" % str(hidden_nonlinearity))
  logger.log("dropout_probs=%s" % str(dropout_probs))
  logger.log("learning_rate=%s" % str(learning_rate))
  logger.log("momentum=%s" % str(momentum))
  logger.log("num_epochs=%d" % num_epochs)
  logger.log("minibatch_size=%d" % minibatch_size)  
    
  #Load the data
  train_set,val_set,test_set = load_data(dataset,split_proportions,
					    logger,shared=False)
  x_train, y_train = train_set
  x_val, y_val = val_set
  x_test, y_test = test_set
  #normalize data to zero mean unit variance
  x_mean = np.mean(x_train)
  x_std = np.std(x_train)
  x_train = (x_train-x_mean)/x_std
  x_val = (x_val-x_mean)/x_std
  x_test = (x_test-x_mean)/x_std
      
  #prepare theano variables for inputs and targets
  input_var = T.matrix('inputs')
  target_var = T.ivector('targets')
  #build the model
  logger.log( '... building the model')
  input_size = x_train.shape[1]
  output_size=np.unique(y_train).size
  #net = build_network(input_var,
		      #input_size,
		      #hidden_sizes,
		      #hidden_nonlinearity,
		      #dropout_probs,
		      #output_size)
  net = cPickle.load(open('best_model.pkl','r'))
  layers = lasagne.layers.get_all_layers(net)
  input_var = layers[0].input_var
  #create loss expression for training
  logger.log( '... building expressions and compiling train functions')
  predicition = lasagne.layers.get_output(net)
  loss = lasagne.objectives.categorical_crossentropy(predicition,target_var)
  loss = loss.mean()
  #create update expressions for training
  params = lasagne.layers.get_all_params(net,trainable=True)
  #linearly decay learning rate
  learning_rate = np.linspace(learning_rate[0],learning_rate[1],num_epochs)
  lr = theano.shared(np.array(learning_rate[0],dtype=theano.config.floatX))
  #linearly grow momentum
  momentum = np.linspace(momentum[0],momentum[1],num_epochs)
  mom = theano.shared(np.array(momentum[0],dtype=theano.config.floatX))
  updates = lasagne.updates.nesterov_momentum(loss,params,
					      learning_rate=lr,
					      momentum=mom)
  #create loss expression for validation/testing
  test_prediction = lasagne.layers.get_output(net,deterministic=True)
  test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
							  target_var)
  test_loss = loss.mean()
  #create an expression for the classification accuracy
  test_acc = T.mean(T.eq(T.argmax(test_prediction,axis=1), target_var),
		    dtype=theano.config.floatX)
  #compile the training function
  train_fn = theano.function([input_var,target_var],
			     loss,
			     updates=updates)
  #compile a validation function for the validation loss and accuracy
  val_fn = theano.function([input_var,target_var],
			   [test_loss,test_acc])
  #train the model
  logger.log( '... training the model')
  start_time = timeit.default_timer()
  best_validation_loss = np.inf    
  training_NLL = []     # average training NLL cost at each epoch (really after val_freq iters)
  validation_NLL = []  # average validation NLL cost at each epoch
  validation_zero_one = [] # average zero one cost at each epoch (% misclassified)
  train_stat_dict = {}
  train_stat_dict['training_NLL'] = training_NLL
  train_stat_dict['validation_NLL'] = validation_NLL
  train_stat_dict['validation_zero_one'] = validation_zero_one    
    
  for epoch in xrange(num_epochs):
    #do a pass over the training data
    lr.set_value(learning_rate[epoch])
    mom.set_value(momentum[epoch])
    train_err=0
    train_batches=0
    for batch in iterate_minibatches(x_train,y_train,
				     minibatch_size,shuffle=True):
      inputs, targets = batch
      train_err += train_fn(inputs, targets)
      train_batches += 1
    #do a pass over the validation data
    val_err = 0
    val_acc = 0
    val_batches = 0
    for batch in iterate_minibatches(x_val, y_val,
				     minibatch_size,shuffle=False):
      inputs, targets = batch
      err, acc = val_fn(inputs, targets)
      val_err += err
      val_acc += acc
      val_batches += 1
    #record results
    training_NLL.append(train_err / train_batches)
    validation_NLL.append(val_err / val_batches)
    validation_zero_one.append(1-(val_acc/val_batches))
    logger.log( 'epoch %i:' % (epoch))
    logger.log('\ttraining NLL loss: %f ' % training_NLL[-1])
    logger.log('\tvalidation NLL loss: %f ' % validation_NLL[-1])
    logger.log('\tvalidation zero one loss: %f %%' % (validation_zero_one[-1] * 100.))
    # if we got the best validation score until now                                        
    if validation_zero_one[-1] < best_validation_loss: 		                            
        # save best validation score and iteration number
        best_validation_loss = validation_zero_one[-1]
        best_epoch = epoch         
        #save the best model
        with open('best_model.pkl', 'w') as f:
                cPickle.dump(net,f)
    # update the best model in a sliding window looking back 50 epochs
    #window_start = max(len(validation_zero_one)-50,0)
    #window_end = len(validation_zero_one)
    #if validation_zero_one[-1] == min(validation_zero_one[window_start:window_end]):
        ## save best validation score and iteration number
        #best_window_validation_loss = validation_zero_one[-1]
        #best_window_epoch = epoch         
        ##save the best model
        #with open('best_window_model.pkl', 'w') as f:
                #cPickle.dump(net,f)
    if (epoch-best_epoch)>1e4: 
      logger.log("Early stopping...")
      break
                 
  ######post training#######
  
  #save train_stat_dict to a .mat file
  sio.savemat('train_stats.mat',train_stat_dict)                        
  #with open('train_stat_dict.pkl','w') as f:                        
  #  cPickle.dump(train_stat_dict,f)     
    
  # After training, we compute and print the test error:
  #load best model
  logger.log("loading model from best_model.pkl")
  net = cPickle.load(open('best_model.pkl','r'))
  #logger.log("loading model from best_window_model.pkl")
  #window_net = cPickle.load(open('best_window_model.pkl','r'))
  test_err, test_acc = predict(net,x_test,y_test)
  test_score = 1 - test_acc
  #test_err, test_acc = predict(window_net,x_test,y_test)
  #window_test_score = 1 - test_acc
  end_time = timeit.default_timer()
  logger.log(
      (
          'Optimization complete with best validation score of %f %%, '
          'on epoch %i, '
          'with test performance %f %%'
      )
      % (best_validation_loss * 100., best_epoch, test_score * 100.)
  )    
  logger.log ('The training code for file ' +
                        os.path.split(__file__)[1] +
                        ' ran for %.2fm' % ((end_time - start_time) / 60.)
  )

  logger.close()