예제 #1
0
def capture_screenshot(feed_url, arch_video_url):
    """
    Creates a screenshot for the specified feed.
    Returns the filename for the screenshot.
    """
    # def capture_screenshot(feed_url, arch_video_url):
    screenshot_file = arch_video_url.replace(".flv", ".jpg")
    proc = None
    if not settings.DEBUG:
        proc = subprocess.Popen(
            [
                "ffmpeg",
                "-f",
                "mjpeg",
                "-i",
                feed_url,
                "-f",
                "image2",
                "-t",
                "0.001",
                "-s",
                settings.ARCHIVE_THUMB_SIZE,
                get_full_path(screenshot_file),
            ],
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
        )
    log("attempting to take screenshot for feed: " + screenshot_file)
    return (screenshot_file, proc)
예제 #2
0
    def _walk_to(self, speed, lat, lng, alt):
        dist = distance(
            i2f(self.api._position_lat), i2f(self.api._position_lng), lat, lng)
        steps = (dist + 0.0) / (speed + 0.0)  # may be rational number
        intSteps = int(steps)
        residuum = steps - intSteps
        logger.log('[#] Walking from ' + str((i2f(self.api._position_lat), i2f(
            self.api._position_lng))) + " to " + str(str((lat, lng))) +
                   " for approx. " + str(format_time(ceil(steps))))
        if steps != 0:
            dLat = (lat - i2f(self.api._position_lat)) / steps
            dLng = (lng - i2f(self.api._position_lng)) / steps

            for i in range(intSteps):
                cLat = i2f(self.api._position_lat) + \
                    dLat + random_lat_long_delta()
                cLng = i2f(self.api._position_lng) + \
                    dLng + random_lat_long_delta()
                self.api.set_position(cLat, cLng, alt)
                self.bot.heartbeat()
                sleep(1)  # sleep one second plus a random delta
                self._work_at_position(
                    i2f(self.api._position_lat), i2f(self.api._position_lng),
                    alt, False)

            self.api.set_position(lat, lng, alt)
            self.bot.heartbeat()
            logger.log("[#] Finished walking")
예제 #3
0
    def get_player_info(self):
        response_dict = self.get_inventory()
        inventory_items = response_dict.get('responses', {}).get('GET_INVENTORY', {}).get(
            'inventory_delta', {}).get('inventory_items', {})
        if inventory_items:
            pokecount = 0
            itemcount = 1
            for item in inventory_items:
                # print('item {}'.format(item))
                playerdata = item.get('inventory_item_data', {}).get('player_stats')
                if playerdata:
                    nextlvlxp = (int(playerdata.get('next_level_xp', 0)) - int(playerdata.get('experience', 0)))

                    if 'level' in playerdata and 'experience' in playerdata:
                        logger.log(
                            'Level: {level}'.format(
                                **playerdata) +
                            ' (Next Level: {} XP)'.format(
                                nextlvlxp) +
                            ' (Total: {experience} XP)'
                            ''.format(**playerdata), 'cyan')

                    if 'pokemons_captured' in playerdata and 'poke_stop_visits' in playerdata:
                        logger.log(
                            'Pokemon Captured: '
                            '{pokemons_captured}'.format(
                                **playerdata) +
                            ' | Pokestops Visited: '
                            '{poke_stop_visits}'.format(
                                **playerdata), 'cyan')
예제 #4
0
    def sceneQuality(name, anime=False):
        """
        Return The quality from the scene episode File
        """

        name = os.path.basename(name)

        checkName = lambda quality_list, func: func([re.search(x, name, re.I) for x in quality_list])

        if anime:
            dvdOptions = checkName(['dvd', 'dvdrip'], any)
            blueRayOptions = checkName(['bluray', 'blu-ray', 'BD'], any)
            sdOptions = checkName(['360p', '480p', '848x480', 'XviD'], any)
            hdOptions = checkName(['720p', '1280x720', '960x720'], any)
            fullHD = checkName(['1080p', '1920x1080'], any)

            if sdOptions and not blueRayOptions and not dvdOptions:
                return Quality.SDTV
            elif dvdOptions:
                return Quality.SDDVD
            elif hdOptions and not blueRayOptions and not fullHD:
                return Quality.HDTV
            elif fullHD and not blueRayOptions and not hdOptions:
                return Quality.FULLHDTV
            elif hdOptions and not blueRayOptions and not fullHD:
                return Quality.HDWEBDL
            elif blueRayOptions and hdOptions and not fullHD:
                return Quality.HDBLURAY
            elif blueRayOptions and fullHD and not hdOptions:
                return Quality.FULLHDBLURAY
            elif sickbeard.ANIME_TREAT_AS_HDTV:
                logger.log(u'Treating file: %s with "unknown" quality as HDTV per user settings' % name, logger.DEBUG)
                return Quality.HDTV
            else:
                return Quality.UNKNOWN

        if checkName(['(pdtv|hdtv|dsr|tvrip)([-]|.((aac|ac3|dd).?\d\.?\d.)*(xvid|x264|h.?264))'], all) and not checkName(['(720|1080|2160)[pi]'], all) \
                and not checkName(['hr.ws.pdtv.(x264|h.?264)'], any):
            return Quality.SDTV
        elif checkName(['web.?dl|web.?rip', 'xvid|x264|h.?264'], all) and not checkName(['(720|1080|2160)[pi]'], all):
            return Quality.SDTV
        elif checkName(['(dvd.?rip|b[r|d]rip)(.ws)?(.(xvid|divx|x264|h.?264))?'], any) and not checkName(['(720|1080|2160)[pi]'], all):
            return Quality.SDDVD
        elif checkName(['720p', 'hdtv', 'x264|h.?264'], all) or checkName(['hr.ws.pdtv.(x264|h.?264)'], any) \
                and not checkName(['(1080|2160)[pi]'], all):
            return Quality.HDTV
        elif checkName(['720p|1080i', 'hdtv', 'mpeg-?2'], all) or checkName(['1080[pi].hdtv', 'h.?264'], all):
            return Quality.RAWHDTV
        elif checkName(['1080p', 'hdtv', 'x264'], all):
            return Quality.FULLHDTV
        elif checkName(['720p', 'web.?dl|web.?rip'], all) or checkName(['720p', 'itunes', 'x264|h.?264'], all):
            return Quality.HDWEBDL
        elif checkName(['1080p', 'web.?dl|web.?rip'], all) or checkName(['1080p', 'itunes', 'x264|h.?264'], all):
            return Quality.FULLHDWEBDL
        elif checkName(['720p', 'blu.?ray|hddvd|b[r|d]rip', 'x264|h.?264'], all):
            return Quality.HDBLURAY
        elif checkName(['1080p', 'blu.?ray|hddvd|b[r|d]rip', 'x264|h.?264'], all):
            return Quality.FULLHDBLURAY
        else:
            return Quality.UNKNOWN
예제 #5
0
    def take_step(self):
        position = (self.origin_lat, self.origin_lon, 0.0)

        self.api.set_position(*position)
        for step in range(self.steplimit2):
            # starting at 0 index
            logger.log('[#] Scanning area for objects ({} / {})'.format(
                (step + 1), self.steplimit**2))
            if self.config.debug:
                logger.log(
                    'steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(
                        self.steplimit2, self.x, self.y, self.pos, self.dx,
                        self.dy))
            # Scan location math
            if -self.steplimit2 / 2 < self.x <= self.steplimit2 / 2 and -self.steplimit2 / 2 < self.y <= self.steplimit2 / 2:
                position = (self.x * 0.0025 + self.origin_lat,
                            self.y * 0.0025 + self.origin_lon, 0)
                if self.config.walk > 0:
                    self._walk_to(self.config.walk, *position)
                else:
                    self.api.set_position(*position)
                print('[#] {}'.format(position))
            if self.x == self.y or self.x < 0 and self.x == -self.y or self.x > 0 and self.x == 1 - self.y:
                (self.dx, self.dy) = (-self.dy, self.dx)

            (self.x, self.y) = (self.x + self.dx, self.y + self.dy)

            self._work_at_position(position[0], position[1], position[2], True)
            sleep(10)
예제 #6
0
 def parse_and_bind(self, string):
     u'''Parse and execute single line of a readline init file.'''
     try:
         log(u'parse_and_bind("%s")' % string)
         if string.startswith(u'#'):
             return
         if string.startswith(u'set'):
             m = re.compile(ur'set\s+([-a-zA-Z0-9]+)\s+(.+)\s*$').match(string)
             if m:
                 var_name = m.group(1)
                 val = m.group(2)
                 try:
                     setattr(self.mode, var_name.replace(u'-',u'_'), val)
                 except AttributeError:
                     log(u'unknown var="%s" val="%s"' % (var_name, val))
             else:
                 log(u'bad set "%s"' % string)
             return
         m = re.compile(ur'\s*(.+)\s*:\s*([-a-zA-Z]+)\s*$').match(string)
         if m:
             key = m.group(1)
             func_name = m.group(2)
             py_name = func_name.replace(u'-', u'_')
             try:
                 func = getattr(self.mode, py_name)
             except AttributeError:
                 log(u'unknown func key="%s" func="%s"' % (key, func_name))
                 if self.debug:
                     print u'pyreadline parse_and_bind error, unknown function to bind: "%s"' % func_name
                 return
             self.mode._bind_key(key, func)
     except:
         log(u'error')
         raise
예제 #7
0
def import_problem(path, pid):
	with app.app_context():
		existing_problem = Problems.query.filter_by(pid=pid).first()
		if existing_problem is not None:
			db.session.delete(existing_problem)
			db.session.commit()
		metadata = yaml.load(open(os.path.join(path, "problem.yml")))
		title = metadata.get("title")
		category = metadata.get("category")
		value = int(metadata.get("value"))
		hint = metadata.get("hint")
		description = open(os.path.join(path, "description.md")).read()
		grader = open(os.path.join(path, "grader.py")).read()

		if "files" in metadata:
			files = metadata["files"]
			files_dir = os.path.join(app.config["UPLOAD_FOLDER"], pid)
			if os.path.exists(files_dir):
				shutil.rmtree(files_dir)
			os.mkdir(files_dir)
			for file in files:
				src = os.path.join(path, file)
				if os.path.exists(src):
					shutil.copyfile(src, os.path.join(files_dir, file))

		try:
			problem.add_problem(title, category, description, value, grader, pid=pid, hint=hint)
		except Exception, e:
			logger.log(__name__, "Error when importing problem '%s': %s" % (pid, str(e)))
예제 #8
0
 def send(self, node, msg, item):
     logger.log(logger.Debug, 'node ', self.me.consensus.cert, ' send to ', node, msg, item)
     # our fake network calls a node's network.receive() directly
     # retry if returns None?
     # how distinguish network failure and a replica saying f**k off
     logger.log(logger.Debug, 'crashed', self.config[node].crashed)
     return Future(False, self.config[node].receive, msg, item)
예제 #9
0
 def readline(self, prompt=u''):
     self.readline_setup(prompt)
     self.ctrl_c_timeout = time.time()
     self._readline_from_keyboard()
     self.console.write(u'\r\n')
     log(u'returning(%s)' % self.get_line_buffer())
     return self.get_line_buffer() + u'\n'
예제 #10
0
def rel_xy(feed, feed_url, feed_proxy, feed_lgn_name, feed_lgn_pw, feed_x, feed_y, step_x, step_y):
    """
    Set the PT(Z) relative to the last position
    
    args    : feed, 
              feed_url,   
              feed_proxy, 
              feed_lgn_name, 
              feed_lgn_pw, 
              feed_x, 
              feed_y
    excepts : 
    return  : 
    """

    current_x, current_y, step_x, step_y = load_xy_step_xy(feed)
    
    # grab 'feed_x', 'feed_y' values as step values, a workaround
    if feed_x != 0:
        step_x = abs(feed_x)
        
    elif feed_y != 0:
        step_y = abs(feed_y)
        
    new_x = current_x + feed_x
    new_y = current_y + feed_y
    move_rel_xy(feed, current_x, current_y, new_x, new_y, step_x, step_y, feed_url, feed_proxy, feed_lgn_name, feed_lgn_pw)
    logger.log('rel_xy() - feed:%s, x:%s, y:%s' % (feed, new_x, new_y), 'DEBUG')
    save_xy_step_xy(feed, new_x, new_y, step_x, step_y)
예제 #11
0
def set_up(networkLibvirt, connLibvirt, networkElem, ipv6addr, prefix):
    newXml = networkElem.toxml()
    #logger.log(networkElem.toxml())
    #ret = dir(conn)
    #for method in ret:
    #    logger.log(repr(method))
    networkLibvirt.undefine()
    networkLibvirt.destroy()
    connLibvirt.networkCreateXML(newXml)
    networkDefault = connLibvirt.networkDefineXML(newXml)
    set_autostart(networkDefault)
    commandForwarding = ['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1']
    logger.log_call(commandForwarding, timeout=15*60)
    configRadvd = """
interface virbr0
{
        AdvSendAdvert on;
        MinRtrAdvInterval 30;
        MaxRtrAdvInterval 100;
        prefix %(ipv6addr)s/%(prefix)s
        {
                AdvOnLink on;
                AdvAutonomous on;
                AdvRouterAddr off;
        };

};
""" % locals()
    with open(radvd_conf_file, 'w') as f:
        f.write(configRadvd)
    kill_radvd()
    start_radvd()
    logger.log("ipv6: set up process finalized -- enabled IPv6 address to the slivers!")
예제 #12
0
 def this_closure ():
     import time
     logger.log("TIMER trigering at %s (was armed at %s, expected to trigger at %s)"%\
                    (reservation.time_printable(time.time()),
                     reservation.time_printable(now),
                     reservation.time_printable(timestamp)))
     self.granularity_callback (now)
예제 #13
0
def touch_url(feed_url, cgi_url, feed_proxy, feed_lgn_name, feed_lgn_pw):
    """
    Touch the URL created by merging 'feed_url, and 'cgi_url' 

    args    : feed_url,
              cgi_url, 
              feed_proxy,
              feed_lgn_name,
              feed_lgn_pw  
    excepts : 
    return  :
    """
     
    base_url = feed_url.split('/SnapshotJPEG')[0]
    
    # add user name and password if supplied
    url_prot, url_body = base_url[:7], base_url[7:]
    if  feed_lgn_name != '' and feed_lgn_pw != '':
        url_prot += '%s:%s@' % (feed_lgn_name, feed_lgn_pw)
    base_url = '%s%s' % (url_prot, url_body)
    
    logger.log('touch_url() - %s%s' % (base_url, cgi_url), 'DEBUG')
    f_obj = urllib.urlopen('%s%s' % (base_url, cgi_url))
    time.sleep(0.2)
    f_obj.close()
예제 #14
0
    def configure(self, rec):
        """Write <rec['keys']> to my authorized_keys file."""
        logger.verbose('account: configuring %s'%self.name)
        new_keys = rec['keys']
        if new_keys != self.keys:
            # get the unix account info
            gid = grp.getgrnam("slices")[2]
            pw_info = pwd.getpwnam(self.name)
            uid = pw_info[2]
            pw_dir = pw_info[5]

            # write out authorized_keys file and conditionally create
            # the .ssh subdir if need be.
            dot_ssh = os.path.join(pw_dir,'.ssh')
            if not os.path.isdir(dot_ssh):
                if not os.path.isdir(pw_dir):
                    logger.verbose('account: WARNING: homedir %s does not exist for %s!'%(pw_dir,self.name))
                    os.mkdir(pw_dir)
                    os.chown(pw_dir, uid, gid)
                os.mkdir(dot_ssh)

            auth_keys = os.path.join(dot_ssh,'authorized_keys')
            tools.write_file(auth_keys, lambda f: f.write(new_keys))

            # set access permissions and ownership properly
            os.chmod(dot_ssh, 0700)
            os.chown(dot_ssh, uid, gid)
            os.chmod(auth_keys, 0600)
            os.chown(auth_keys, uid, gid)

            # set self.keys to new_keys only when all of the above ops succeed
            self.keys = new_keys

            logger.log('account: %s: installed ssh keys' % self.name)
예제 #15
0
def start():
    """The database dumper daemon.
When it starts up, it populates the database with the last dumped database.
It proceeds to handle dump requests forever."""
    def run():
        global dump_requested
        while True:
            db_lock.acquire()
            while not dump_requested: db_cond.wait()
            db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL)
            dump_requested = False
            db_lock.release()
            try:
                tools.write_file(DB_FILE, lambda f: f.write(db_pickle))
                logger.log_database(db)
            except:
                logger.log_exc("database.start: failed to pickle/dump")
    global db
    try:
        f = open(DB_FILE)
        try: db = cPickle.load(f)
        finally: f.close()
    except IOError:
        logger.log ("database: Could not load %s -- starting from a fresh database"%DB_FILE)
        db = Database()
    except:
        logger.log_exc("database: failed in start")
        db = Database()
    logger.log('database.start')
    tools.as_daemon_thread(run)
예제 #16
0
def restartService():
    if not os.path.exists("/etc/init.d/codemux"): return
    logger.log("codemux:  Restarting codemux service")
    if isRunning():
        logger.log_call(["/etc/init.d/codemux","condrestart", ])
    else:
        logger.log_call(["/etc/init.d/codemux","restart", ])
예제 #17
0
파일: admin.py 프로젝트: EasyCTF/OpenCTF
def import_repository(path, problems):
	logger.log(__name__, "Importing %s" % str(problems))
	for problem in problems:
		problem_path = os.path.join(path, problem)
		if os.path.isdir(problem_path):
			import_problem(problem_path, problem)
	shutil.rmtree(path)
예제 #18
0
 def scan_block(self, blockhash):
     if self.metastore.did_scan(self.color_id, blockhash):
         return
     log("scan block %s", blockhash)
     for tx in self.blockchain_state.iter_block_txs(blockhash):
         self.scan_tx(tx)
     self.metastore.set_as_scanned(self.color_id, blockhash)
예제 #19
0
 def GetSlivers(self, config, plc):
     """Retrieves GetSlivers at PLC and triggers callbacks defined in modules/plugins"""
     try:
         logger.log("nodemanager: Syncing w/ PLC")
         # retrieve GetSlivers from PLC
         data = plc.GetSlivers()
         # use the magic 'default' slice to retrieve system-wide defaults
         self.getPLCDefaults(data, config)
         # tweak the 'vref' attribute from GetSliceFamily
         self.setSliversVref (data)
         # dump it too, so it can be retrieved later in case of comm. failure
         self.dumpSlivers(data)
         # log it for debug purposes, no matter what verbose is
         logger.log_slivers(data)
         logger.verbose("nodemanager: Sync w/ PLC done")
         last_data=data
     except:
         logger.log_exc("nodemanager: failed in GetSlivers")
         #  XXX So some modules can at least boostrap.
         logger.log("nodemanager:  Can't contact PLC to GetSlivers().  Continuing.")
         data = {}
         # for modules that request it though the 'persistent_data' property
         last_data=self.loadSlivers()
     #  Invoke GetSlivers() functions from the callback modules
     for module in self.loaded_modules:
         logger.verbose('nodemanager: triggering %s.GetSlivers'%module.__name__)
         try:
             callback = getattr(module, 'GetSlivers')
             module_data=data
             if getattr(module,'persistent_data',False):
                 module_data=last_data
             callback(data, config, plc)
         except:
             logger.log_exc("nodemanager: GetSlivers failed to run callback for module %r"%module)
예제 #20
0
 def __init__(self, host, port):
     """
     Initializes the element by opening a socket to the android SMSServer
     Does not do any exception handling.
     
     host is the host on which the android SMS server is listening
     port is the port on which the android SMS server is listening
     """
     SMSPipelineElement.__init__(self, 'android connector', 'android device')
     
     self.port = port
     self.host  = host
     
     # Making the connection
     # We don't catch the error here, we
     # leave that as an exercise to the caller
     android_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     android_socket.connect((self.host, self.port))      
     logger.log(self, "connected to android device on on host %s port %d" % (host, port))
     
     # Obtain mock file objects for the socket
     # And the TextParser to parse the text message
     self.out_ = android_socket.makefile(mode = 'w')
     self.in_ = android_socket.makefile()
     self.socket_ = android_socket
     self.text_parser = TextParser(self.in_)
     
     # Used to avoid double closing
     self.closed = False       
 def parse_and_bind(self, string):
     """Parse and execute single line of a readline init file."""
     try:
         log('parse_and_bind("%s")' % string)
         if string.startswith("#"):
             return
         if string.startswith("set"):
             m = re.compile(r"set\s+([-a-zA-Z0-9]+)\s+(.+)\s*$").match(string)
             if m:
                 var_name = m.group(1)
                 val = m.group(2)
                 try:
                     setattr(self, var_name.replace("-", "_"), val)
                 except AttributeError:
                     log('unknown var="%s" val="%s"' % (var_name, val))
             else:
                 log('bad set "%s"' % string)
             return
         m = re.compile(r"\s*(.+)\s*:\s*([-a-zA-Z]+)\s*$").match(string)
         if m:
             key = m.group(1)
             func_name = m.group(2)
             py_name = func_name.replace("-", "_")
             try:
                 func = getattr(self.mode, py_name)
             except AttributeError:
                 log('unknown func key="%s" func="%s"' % (key, func_name))
                 if self.debug:
                     print 'pyreadline parse_and_bind error, unknown function to bind: "%s"' % func_name
                 return
             self.mode._bind_key(key, func)
     except:
         log("error")
         raise
    def __init__(self, user_data_filename):
        """
        Initializes a new instance of the RedditClient class.
        :param user_data_filename: The file that should be created or read in with user data.
        """
        # The Reddit API instance
        self.api = praw.Reddit(user_agent="windows:reddit_play_thing:v1.0.0")

        # The hard coded application information we registered with Reddit
        self.api.set_oauth_app_info("NvFC9EM7Z1jB4Q", "", "http://127.0.0.1:65010/authorize_callback")

        # The number of times we can fail to make a call before stopping a loop
        self.FAILURE_LIMIT = 10

        # The file to save the user data in that needs to be persisted between runs
        self.user_data_filename = user_data_filename

        # A dictionary from the Reddit API containing the following:
        # "scope": Our access permissions per https://github.com/reddit/reddit/wiki/OAuth2
        # "access_token": The access token the user provided us when they gave us permissions to use their username
        # "refresh_token": The refresh token used to refresh our permissions to the user
        self.access_information = None

        try:
            if self.user_data_filename is not None and os.path.isfile(self.user_data_filename):
                with open(self.user_data_filename, "rb") as file:
                    self.access_information = pickle.load(file)
        except Exception as e:
            logger.log("RedditClient.__init__: Error loading user data and refreshing:", e)

        # Try to login if we had information saved
        if self.access_information is not None:
            self.login(self.access_information["refresh_token"])
예제 #23
0
파일: doc.py 프로젝트: alberthier/bhware
    def on_start(self, packet):
        self.event_loop.is_match_started = True
        self.yield_at(90000, EndOfMatch())
        logger.log("Starting ...")

        self.fsm.interbot_fsm.current_state.set_teammate_collision_detection(False)
        self.event_loop.map.use_interbot_position = False

        yield Trigger(TORCH_GUIDE_CLOSE)

#        yield TimerWaitTeamMateToLeave(1000, dy = 0.5)
        yield Timer(1000)
        yield MoveLineTo(self.start_x, RED_START_Y)

        # huntgoal = self.robot.goal_manager.get_goals("HuntTheMammoth")[0]
        # yield Navigate(huntgoal.x, huntgoal.y)
        # yield HuntTheMammoth()
        # self.robot.goal_manager.update_goal_status(huntgoal, GOAL_DONE)
        # torchgoal = self.robot.goal_manager.get_goals("TakeTorch_Mine")[0]
        # yield Navigate(torchgoal.x, torchgoal.y)

        # self.fsm.interbot_fsm.current_state.set_teammate_collision_detection(True)

        # yield TakeTorch(None, True)
        # torchgoal = self.robot.goal_manager.update_goal_status(torchgoal, GOAL_DONE)

        while True :
            yield ExecuteGoals()
            yield Timer(1000)
예제 #24
0
def main():
    monitor = PSysMonitor()
    try:
        monitor.start()
    except:
        logger.log("ERROR", sys.exc_info()[0])
        raise
예제 #25
0
def api_post_request(api_url, extra_payload={}, extra_headers={}):
    token = settings.read('api', 'access_token')
    if not token:
        token = get_token()
        settings.write('api', 'access_token', token)

    headers = {'Access-Token': token,
               'Content-Type': 'application/json'}

    payload = {}
    if extra_headers:
        headers.update(extra_headers)
    if extra_payload:
        payload.update(extra_payload)

    r = requests.post(api_url, data=json.dumps(payload), headers=headers)
    if r.status_code != 200:
        log_msg = 'api post error: {0}\n{1}\napi url: {2}\npayload: {3}'.format(r.status_code,
                                                                                r.text,
                                                                                api_url,
                                                                                payload)
        log(log_msg)
        raise PbApiException()
    else:
        return r.json()
예제 #26
0
파일: marty.py 프로젝트: alberthier/bhware
    def on_enter(self):
        if self.robot.team == TEAM_YELLOW:
            direction = DIRECTION_BACKWARDS
            angle = -math.pi / 2.0
        else:
            direction = DIRECTION_FORWARD
            angle = math.pi / 2.0

        x = self.robot.pose.x
        y = MAMMOTH_HUNT_Y_FINAL

        if self.robot.team == TEAM_RED:
            move = yield LookAt(x, y, direction = direction)
        else :
            move = yield LookAtOpposite(x, y, direction = direction)
        move = yield MoveLineTo(x, y, direction = direction, chained = move)

        # yield CalibrateAxis("y", 0.3, math.pi/2)

        move = yield RotateTo(angle, chained = move)

        if move.exit_reason != TRAJECTORY_DESTINATION_REACHED :
            logger.log("We missed the mammoth, leave")
            yield None


        yield KillMammoth()


        yield RotateTo(0.0)

        yield MoveLineRelative(0.1)

        self.exit_reason = GOAL_DONE
        yield None
예제 #27
0
파일: vsys.py 프로젝트: fdawg4l/NodeManager
def GetSlivers(data, config=None, plc=None):
    """For each sliver with the vsys attribute, set the script ACL, create the vsys directory in the slice, and restart vsys."""

    if 'slivers' not in data:
        logger.log_missing_data("vsys.GetSlivers",'slivers')
        return

    # Touch ACLs and create dict of available
    scripts = {}
    for script in touchAcls(): scripts[script] = []
    # slices that need to be written to the conf
    slices = []
    _restart = False
    # Parse attributes and update dict of scripts
    if 'slivers' not in data:
        logger.log_missing_data("vsys.GetSlivers",'slivers')
        return
    for sliver in data['slivers']:
        for attribute in sliver['attributes']:
            if attribute['tagname'] == 'vsys':
                if sliver['name'] not in slices:
                    # add to conf
                    slices.append(sliver['name'])
                    _restart = createVsysDir(sliver['name']) or _restart
                if attribute['value'] in scripts.keys():
                    scripts[attribute['value']].append(sliver['name'])

    # Write the conf
    _restart = writeConf(slices, parseConf()) or _restart
    # Write out the ACLs
    if writeAcls(scripts, parseAcls()) or _restart:
        logger.log("vsys: restarting vsys service")
        logger.log_call(["/etc/init.d/vsys", "restart", ])
예제 #28
0
 def found_terminator(self):
     if self.state == WsgiRequestHandler.READING_HTTP_HEADER:
         self.parse_header()
         if self.environ["REQUEST_METHOD"] == "POST":
             if "CONTENT_LENGTH" in self.environ:
                 content_length = int(self.environ["CONTENT_LENGTH"])
                 self.set_terminator(content_length)
             else:
                 self.set_terminator(b"\0")
             self.state = WsgiRequestHandler.READING_HTTP_POST_DATA
         else:
             self.set_terminator(None)
             self.state = WsgiRequestHandler.READING_DONE
         self.ibuffer = bytes()
     elif self.state == WsgiRequestHandler.READING_HTTP_POST_DATA:
         self.set_terminator(None)
         self.post_data.write(self.ibuffer)
         self.post_data.seek(0)
         self.ibuffer = bytes()
         self.state = WsgiRequestHandler.READING_DONE
     if self.state == WsgiRequestHandler.READING_DONE:
         errors = io.StringIO()
         self.server.current_environ = self.environ.copy()
         handler = wsgiref.handlers.SimpleHandler(self.post_data, self, errors, self.environ, False, False)
         handler.server_software = "BHWebServer/2012" + " Python/" + sys.version.split()[0]
         handler.run(self.server.get_app())
         if len(errors.getvalue()) != 0:
             logger.log(errors.getvalue())
예제 #29
0
    def solve_case(self):
        logger.log("A:{A}, B:{B}".format(A=self._A, B=self._B), i_force=True, o_log_file=self._debug_file)

        num_recycled = 0
        recycled = set()
        for i in range(self._A, self._B + 1):
            if i < 10:
                # Single digit numbers fail automatically
                continue
            si = str(i)

            # Try every set of rotated digits
            for j in range(1,len(si)):
                new_num = int(si[j:] + si[:j])
                if new_num == i:
                    # Equal digits don't count
                    continue
                if (new_num >= self._A) and (new_num <= self._B):
                    # The rotated number is within the bounds
                    if ((i,new_num) not in recycled) and ((new_num,i) not in recycled):
                        # It's already been seen as recycled
                        recycled.add((i,new_num))
                        #print i,new_num
                        num_recycled += 1

        #print recycled
        return num_recycled
예제 #30
0
파일: bitcoin_rpc.py 프로젝트: sumory/BTC
 def prevhash(self):
     resp = self._call('getwork', [])
     try:
         return json.loads(resp)['result']['data'][8:72]
     except Exception as e:
         logger.log('error', "Cannot decode prevhash %s" % str(e))
         raise
예제 #31
0
            " (already installed)")
        return
    # Copy the plugin
    copytree(plugin_dir, "/data/plugins/" + metadata["id"])
    # Add u+x permissions to jobs files
    for job_file in glob(plugin_dir + "jobs/*"):
        st = stat(job_file)
        chmod(job_file, st.st_mode | S_IEXEC)


try:

    # Check if we have plugins to download
    plugin_urls = getenv("EXTERNAL_PLUGIN_URLS", "")
    if plugin_urls == "":
        log("JOBS", "ℹ️", "No external plugins to download")
        _exit(0)

    # Loop on URLs
    for plugin_url in plugin_urls.split(" "):

        # Download ZIP file
        try:
            req = get(plugin_url)
        except:
            log(
                "JOBS", "❌", "Exception while downloading plugin(s) from " +
                plugin_url + " :")
            print(format_exc())
            status = 2
            continue
예제 #32
0
import os

from config import conf
from logger import log
from cmdClient.cmdClient import cmdClient

from BotData import BotData
from Timer import TimerInterface

# Get the real location
__location__ = os.path.realpath(
    os.path.join(os.getcwd(), os.path.dirname(__file__)))

# Load required data from configs
masters = [int(master.strip()) for master in conf['masters'].split(",")]
config = BotData(app="pomo", data_file="data/config_data.db")

# Initialise the client
client = cmdClient(prefix=conf['prefix'], owners=masters)
client.config = config

# Load the commands
client.load_dir(os.path.join(__location__, 'commands'))

# Initialise the timer
TimerInterface(client, conf['session_store'])

# Log and execute!
log("Initial setup complete, logging in", context='SETUP')
client.run(conf['TOKEN'])
try :

    # Check if at least a server has Blacklist activated
    blacklist_activated = False
    # Multisite case
    if os.getenv("MULTISITE") == "yes" :
        for first_server in os.getenv("SERVER_NAME").split(" ") :
            if os.getenv(first_server + "_USE_BLACKLIST", os.getenv("USE_BLACKLIST")) == "yes" :
                blacklist_activated = True
                break
    # Singlesite case
    elif os.getenv("USE_BLACKLIST") == "yes" :
        blacklist_activated = True
    if not blacklist_activated :
        logger.log("BLACKLIST", "ℹ️", "Blacklist is not activated, skipping downloads...")
        os._exit(0)

    # Create directories if they don't exist
    os.makedirs("/opt/bunkerweb/cache/blacklist", exist_ok=True)
    os.makedirs("/opt/bunkerweb/tmp/blacklist", exist_ok=True)

    # Our urls data
    urls = {
        "IP": [],
        "RDNS": [],
        "ASN" : [],
        "USER_AGENT": [],
        "URI": []
    }
예제 #34
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     # noinspection PyTypeChecker
     self.duration=(datetime.datetime.now()-self.start_time).total_seconds()
     if self.name:
         logger.log("{} duration: {} s".format(self.name, self.duration))
     return False
예제 #35
0
def simple_timer(name):
    start_time = datetime.datetime.now()
    yield
    delta = datetime.datetime.now() - start_time
    logger.log("{} duration: {} s".format(name, delta.total_seconds()))
    return False
예제 #36
0
def perform(user_id, data):
    if len(data) > 33 * 1024:
        return ERROR_INVALID_FORMAT
    if len(data) < 31 * 1024:
        return ERROR_INVALID_FORMAT
    if data[0x120b] != 0xe6 or data[0x4000] != 0xc3:
        return ERROR_INVALID_FORMAT
    if data[0x4007] != 0x01:
        return ERROR_NO_DATA_FOUND
    session = util.get_compiler_session(user_id)
    old_data = session['current_save']
    change_fun = False
    change_blessing = False
    try:
        data = data[0x6100:0x6100 + PASSWORD_LENGTH]
        with decoder_lock:
            new_data = generator.savdecoder.parse(data,
                                                  session['current_kingdom'])
        if new_data['special']:
            new_data_tmp = copy.deepcopy(old_data)
            new_data_tmp['save_id'] = new_data['save_id']
            new_data = new_data_tmp
        new_data['events'][event_flags.EVENT_K1_PRE_COMPLETE] = True
        if new_data['events'][event_flags.EVENT_K4_FUN_VALUE_CHANGED]:
            change_fun = True
            new_data['events'][event_flags.EVENT_K4_FUN_VALUE_CHANGED] = False
        if new_data['events'][
                event_flags.
                EVENT_K3_LAYLAH_BLESSING] and not session['laylah_blessing']:
            change_blessing = True
    except:
        logger.log(
            TAG,
            "exception occured while decoding save data for uid %i" % user_id)
        logger.log_exc(TAG)
        return ERROR_INVALID_DATA
    valid_id = util.get_save_id(session['user'], session['current_kingdom'],
                                session['visit_started'])
    if new_data['save_id'] != valid_id:
        logger.log(TAG,
                   "decoding save data for uid %i: wrong save id" % user_id)
        return ERROR_UNKNOWN_SAVE_ID
    rules = configparser.ConfigParser()
    rules.read(SAV_DIRECTORY + "/template/maps/" + session['current_kingdom'] +
               "/meta.txt")
    verify = generator.anticheat.verify(session, old_data, new_data, rules)
    if not verify:
        logger.log(
            TAG,
            "decoding save data for uid %i, kingdom %s: anticheat verification failed"
            % (user_id, session['current_kingdom']))
        return ERROR_ANTICHEAT_REJECTED
    if session['current_kingdom_undecorated'] not in session[
            'visited_kingdoms']:
        session['visited_kingdoms'].append(
            session['current_kingdom_undecorated'])
    storage.sql(
        """
        UPDATE progress SET
           cur_kingdom = 'none',
           save_blob = ?,
           save_uid = '',
           visited_kingdoms = ?
        WHERE user_id = ?
    """, (json.dumps(new_data), json.dumps(
            session['visited_kingdoms']), user_id))
    if change_fun:
        storage.sql(
            """
            UPDATE users SET fun = ?
            WHERE id = ?
        """, (random.randrange(0, 256), user_id))
    if change_blessing:
        storage.sql(
            """
            UPDATE progress SET laylah_blessing = 1
            WHERE user_id = ?
        """, (user_id, ))
    achievements.update(user_id, session, new_data)
    return (0, session['current_kingdom'])
예제 #37
0
 for variable, value in os.environ.items():
     if not variable.startswith("CLUSTER_INSTANCE_"):
         continue
     endpoint = value.split(" ")[0]
     host = value.split(" ")[1]
     api = API(endpoint, host=host)
     sent, err, status, resp = api.request("POST",
                                           "/lets-encrypt/challenge",
                                           data={
                                               "token": token,
                                               "validation": validation
                                           })
     if not sent:
         status = 1
         log(
             "LETS-ENCRYPT", "❌", "Can't send API request to " +
             api.get_endpoint() + "/lets-encrypt/challenge : " + err)
     else:
         if status != 200:
             status = 1
             log(
                 "LETS-ENCRYPT", "❌",
                 "Error while sending API request to " +
                 api.get_endpoint() +
                 "/lets-encrypt/challenge : status = " +
                 resp["status"] + ", msg = " + resp["msg"])
         else:
             log(
                 "LETS-ENCRYPT", "ℹ️",
                 "Successfully sent API request to " +
                 api.get_endpoint() + "/lets-encrypt/challenge")
예제 #38
0
 def get_all_employees():
     log(f"Indexing all employees")
     return jsonify(EmployeeService.all_employees()), 200
예제 #39
0
def main():

    logger.configure('{}{}_logs'.format(filePath, envName))
    for k, v in C.items():
        logger.record_tabular(k, v)
    logger.dump_tabular()

    logger.log(
        'Storing weights after 1000000 steps. Practice DQN with Dense 512. 3 layer trasnfer. Epsilon 1.0 to 0.1 6e5.'
    )

    sess = tf.InteractiveSession()

    with open(weightsPath, "rb") as wt:
        weights = pickle.load(wt)

    wt_cnn = weights[0]
    train_env = make_env(C['env_id'], C['noop_max'])
    eval_env = make_env(C['env_id'], C['noop_max'])
    train_s = train_env.reset()
    agent = Agent(train_env, C, wt_cnn)

    train_reward = tf.placeholder(tf.float32)
    eval_reward = tf.placeholder(tf.float32)
    train_summary = tf.summary.scalar('train_reward', train_reward)
    eval_summary = tf.summary.scalar('eval_reward', eval_reward)
    writer = tf.summary.FileWriter('{}{}_summary'.format(filePath, envName),
                                   sess.graph)

    sess.run(tf.global_variables_initializer())
    for it in range(C['pre_iterations']):
        train_a = agent.act_pre()
        ns, train_r, train_d, _ = train_env.step(train_a)
        agent.record(train_s, train_a, train_r, float(train_d), it, True)
        train_s = ns
        if train_d:
            train_s = train_env.reset()

    logger.log('Pre-training completed')

    agent.net.initialize_online_network()
    train_track = [0.0]
    eval_track = []
    best_reward = 0

    train_fs = reset_fs()
    train_s = train_env.reset()
    best_reward = 0
    train_mean = []
    eval_mean = []

    agent.net.update_target_network()

    for it in range(C['iterations']):

        train_fs.append(train_s)

        train_a = agent.act(np.transpose(train_fs, (1, 2, 0)))
        ns, train_r, train_d, _ = train_env.step(train_a)
        #print('Iteration ',it, ' Reward ', train_r)
        train_track[-1] += train_r
        agent.record(train_s, train_a, train_r, float(train_d), it, False)
        train_s = ns

        if train_d:
            if train_env.env.env.was_real_done:
                if len(train_track) % 100 == 0:
                    train_mean.append(np.mean(train_track[-100:]))
                    summary = sess.run(
                        train_summary,
                        feed_dict={train_reward: np.mean(train_track[-100:])})
                    writer.add_summary(summary, it)
                    logger.record_tabular('steps', it)
                    logger.record_tabular('episode', len(train_track))
                    logger.record_tabular('epsilon', 100 * agent.epsilon)
                    logger.record_tabular('learning rate', agent.lr)
                    logger.record_tabular('Mean Reward 100 episdoes',
                                          np.mean(train_track[-100:]))
                    logger.dump_tabular()
                    with open(resultPath + 'reward_atari_practice.pk1',
                              'wb') as f:
                        pickle.dump(train_track,
                                    f,
                                    protocol=pickle.HIGHEST_PROTOCOL)
                train_track.append(0.0)

            train_fs = reset_fs()
            train_s = train_env.reset()

        if (it + 1) % C['eval_freq'] == 0:

            for i in range(C['eval_episodes']):
                temp_video = []
                eval_track.append(0.0)
                eval_fs = reset_fs()
                eval_s = eval_env.reset()
                while True:
                    temp_video.append(eval_s)
                    eval_fs.append(eval_s)
                    eval_a = agent.greedy_act(np.transpose(eval_fs, (1, 2, 0)))
                    eval_s, eval_r, eval_d, _ = eval_env.step(eval_a)
                    eval_track[-1] += eval_r
                    if eval_env.env.env.was_real_done:
                        break
                    if eval_d:
                        eval_fs = reset_fs()
                        eval_s = eval_env.reset()

                if eval_track[-1] > best_reward:
                    best_reward = eval_track[-1]
                    best_video = temp_video
                    with open(resultPath + 'video_atari_practice.pk1',
                              'wb') as f:
                        pickle.dump(best_video,
                                    f,
                                    protocol=pickle.HIGHEST_PROTOCOL)

            eval_mean.append(np.mean(eval_track[-C['eval_episodes']:]))
            logger.log(
                'Evaluate mean reward: {:.2f}, max reward: {:.2f}, std: {:.2f}'
                .format(np.mean(eval_track[-C['eval_episodes']:]),
                        np.max(eval_track[-C['eval_episodes']:]),
                        np.std(eval_track[-C['eval_episodes']:])))
            summary = sess.run(eval_summary,
                               feed_dict={
                                   eval_reward:
                                   np.mean(eval_track[-C['eval_episodes']:])
                               })
            writer.add_summary(summary, it)
            with open(resultPath + 'eval_reward_atari_practice.pk1',
                      'wb') as f:
                pickle.dump(eval_track, f, protocol=pickle.HIGHEST_PROTOCOL)
        """if it%1000000 == 0:
      outputs = agent.net.get_outputs(np.transpose(train_fs, (1,2,0)))
      with open(resultPath+str(it)+'outputs.pk1', 'wb') as f:
        pickle.dump(outputs, f, protocol=pickle.HIGHEST_PROTOCOL)
      with open(resultPath+str(it)+'outputs_screen.pk1', 'wb') as f:
        pickle.dump(train_fs, f, protocol=pickle.HIGHEST_PROTOCOL)"""

        if it % 1000000 == 0:
            weights = agent.net.get_weights()
            with open(resultPath + str(it) + '_weights.pk1', 'wb') as f:
                pickle.dump(weights, f, protocol=pickle.HIGHEST_PROTOCOL)

    with open(resultPath + 'reward_atari_practice.pk1', 'wb') as f:
        pickle.dump(train_track, f, protocol=pickle.HIGHEST_PROTOCOL)
    with open(resultPath + 'trainMean_atari_practice.pk1', 'wb') as f:
        pickle.dump(train_mean, f, protocol=pickle.HIGHEST_PROTOCOL)
    with open(resultPath + 'evalMean_atari_practice.pk1', 'wb') as f:
        pickle.dump(eval_mean, f, protocol=pickle.HIGHEST_PROTOCOL)
    agent.net.save(filePath + '{}_model2'.format(C['env_id']))
    sess.close()
예제 #40
0
import sys
from loguru import logger
from logger import log

logger.debug("Log1")
logger.error("Log2")

log(1)
log(1, 2)
log(1, 2, 3, error=True)

# @logger.catch
# def f1():
#   1/0
# f1()
예제 #41
0
def log(message):
    logger.log(message, 'master')
 def process_events(self, current_env):
     old_env = current_env
     # TODO : check why filter isn't working as expected
     #for event in self.__client.events(decode=True, filters={"type": "service", "label": ["bunkerized-nginx.AUTOCONF", "bunkerized-nginx.SERVER_NAME"]}) :
     for event in self.__client.events(decode=True,
                                       filters={"type": "service"}):
         new_env = self.get_env()
         if new_env != old_env:
             self.lock.acquire()
             try:
                 log("controller", "INFO", "generating new configuration")
                 if self.gen_conf(new_env):
                     old_env = new_env.copy()
                     log("controller", "INFO",
                         "successfully generated new configuration")
                     if self.reload():
                         log("controller", "INFO", "successful reload")
                     else:
                         log("controller", "ERROR", "failed reload")
                 else:
                     log("controller", "ERROR",
                         "can't generate new configuration")
             except:
                 log("controller", "ERROR",
                     "exception while receiving event")
             self.lock.release()
예제 #43
0
 def scan_block(self, height):
     log("scanning block at height %s" % height)
     for tx in self.blockchain_state.iter_block_txs(height):
         self.scan_tx(tx)
     self.cur_height = height
     self.metastore.set_scan_height(self.color_id, self.cur_height)
예제 #44
0
파일: main.py 프로젝트: exqlnet/wjx-api
只需要输入问卷星账号密码和问卷的activity标识
用于在每天的20点清空填写数据,21点开启问卷,其他时间关闭问卷
如果没有下载数据则会自动下载数据并保存到data目录下
"""
activity = "42794295"

from wjx import Wjx
import time
from datetime import datetime
import random
import os
from logger import log

wjx = Wjx("wjx_username", "wjx_password")
stat = wjx.is_running(activity)
log("该问卷运行状态:", "正在运行" if stat else "未运行")

# 先创建data目录
if not os.path.exists("data"):
    os.mkdir("data")

while True:
    now = datetime.now()
    log("Checking...")
    filename = "data/" + now.strftime("%Y-%m-%d") + ".xls"
    if now.hour == 20:
        wjx.clear(activity)
    if now.hour >= 21:
        wjx.start(activity)
    else:
        wjx.stop(activity)
예제 #45
0
import multiprocessing as mp
import time, datetime
from logger import log
from worker import worker

if __name__ == '__main__':
    start = datetime.datetime.now()
    processes = []
    for i in range(200):
        p = mp.Process(target=worker, args=(i, ))
        processes.append(p)
        p.start()
    for p in processes:
        p.join()

    log("Took " + str(datetime.datetime.now() - start))
    log("pls")
예제 #46
0
def start():
    logger.log("net: plugin starting up...")
예제 #47
0
def run(account_config, memcache_config, rabbitmq_config, filter_config):
    logger.log('Initializing stream listener')
    listener = CustomStreamListener(account_config, memcache_config,
                                    rabbitmq_config)
    listener.start(filter_config)
예제 #48
0
def _process_work_items(work_queue):
    '''
    This runs in the multiprocessing forks to do the actual work. It is a long-lived loop.
    '''
    while True:
        if terminate:
            logger.debug_log('got terminate; stopping work')
            break

        logger.debug_log('_process_work_items: dequeueing work item')
        # This blocks if the queue is empty
        email_diagnostic_info = work_queue.get()
        logger.debug_log('_process_work_items: dequeued work item')

        logger.debug_log('feedback object id: %s' %
                         email_diagnostic_info['diagnostic_info_record_id'])

        # Check if there is (yet) a corresponding diagnostic info record
        diagnostic_info = datastore.find_diagnostic_info(
            email_diagnostic_info['diagnostic_info_record_id'])
        if not diagnostic_info:
            logger.debug_log('diagnostic_info not found; skipping')
            continue

        logger.log('feedback id: %s' %
                   diagnostic_info.get('Metadata', {}).get('id'))

        diagnostic_info_text = pprint.pformat(diagnostic_info,
                                              indent=1,
                                              width=75)

        try:
            diagnostic_info_html = mailformatter.format(diagnostic_info)
        except Exception as e:
            logger.error('format failed: %s' % str(e))

            diagnostic_info_html = None

        # If we get to here, then we have a valid diagnostic email.
        # Reply with the decrypted content.

        # If this is not a reply, set a subject
        # If no subject is pre-determined, create one.
        if email_diagnostic_info.get('email_id') is None:
            subject = u'DiagnosticInfo: %s (%s)' % (
                diagnostic_info['Metadata'].get('platform',
                                                '[NO_PLATFORM]').capitalize(),
                diagnostic_info['Metadata'].get('id', '[NO_ID]'))
        else:
            subject = u'Re: %s' % (email_diagnostic_info['email_subject']
                                   or '')

        try:
            sender.send_response(
                config['decryptedEmailRecipient'],
                config['emailUsername'],
                subject,
                diagnostic_info_text,
                diagnostic_info_html,
                email_diagnostic_info.get('email_id'),  # may be None
                None)  # no attachment
            logger.log('decrypted formatted email sent')
        except Exception as e:
            logger.exception()
            logger.error(str(e))

        # Delete the processed record. (Note that sending the email might have
        # failed, but we're deleting it anyway. This is a debatable decision.)
        datastore.remove_email_diagnostic_info(email_diagnostic_info)
예제 #49
0
 def on_error(self, status_code):
     logger.log(' '.join(
         ['Encountered error with status code:',
          str(status_code)]))
     return True
예제 #50
0
from DockerController import DockerController
from SwarmController import SwarmController
from IngressController import IngressController

from logger import log

# Get variables
swarm		= os.getenv("SWARM_MODE", "no") == "yes"
kubernetes	= os.getenv("KUBERNETES_MODE", "no") == "yes"
api_uri		= os.getenv("API_URI", "")
docker_host	= os.getenv("DOCKER_HOST", "unix:///var/run/docker.sock")
http_port	= os.getenv("HTTP_PORT", "8080")

# Instantiate the controller
if swarm :
	log("autoconf", "INFO", "swarm mode detected")
	controller = SwarmController(docker_host, api_uri, http_port)
elif kubernetes :
	log("autoconf", "INFO", "kubernetes mode detected")
	controller = IngressController(api_uri, http_port)
else :
	log("autoconf", "INFO", "docker mode detected")
	controller = DockerController(docker_host)

# Run the reload server in background if needed
if swarm or kubernetes :
	log("autoconf", "INFO", "start reload server in background")
	(server, thread) = run_reload_server(controller)

# Wait for instances
log("autoconf", "INFO", "wait until a bunkerized-nginx instance is started ...")
예제 #51
0
def _do_exit(signum, frame):
    logger.log('Shutting down')
    sys.exit(0)
예제 #52
0
 def on_timeout(self):
     logger.log('Timeout...')
     return True
예제 #53
0
def help(bot, update):
    update.message.reply_text(HELP_MESSAGE)
    logger.log(update.message.from_user, update.message.text, HELP_MESSAGE)
예제 #54
0
def learn(env,
          policy_func,
          reward_giver,
          reward_guidance,
          expert_dataset,
          rank,
          pretrained,
          pretrained_weight,
          *,
          g_step,
          d_step,
          entcoeff,
          save_per_iter,
          ckpt_dir,
          log_dir,
          timesteps_per_batch,
          task_name,
          gamma,
          lam,
          algo,
          max_kl,
          cg_iters,
          cg_damping=1e-2,
          vf_stepsize=3e-4,
          d_stepsize=1e-4,
          vf_iters=3,
          max_timesteps=0,
          max_episodes=0,
          max_iters=0,
          loss_percent=0.0,
          callback=None):

    nworkers = MPI.COMM_WORLD.Get_size()
    rank = MPI.COMM_WORLD.Get_rank()
    np.set_printoptions(precision=3)
    # Setup losses and stuff
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space
    policy = build_policy(env, 'mlp', value_network='copy')

    ob = observation_placeholder(ob_space)
    with tf.variable_scope('pi'):
        pi = policy(observ_placeholder=ob)
    with tf.variable_scope('oldpi'):
        oldpi = policy(observ_placeholder=ob)

    atarg = tf.placeholder(
        dtype=tf.float32,
        shape=[None])  # Target advantage function (if applicable)
    ret = tf.placeholder(dtype=tf.float32, shape=[None])  # Empirical return

    ac = pi.pdtype.sample_placeholder([None])

    kloldnew = oldpi.pd.kl(pi.pd)
    ent = pi.pd.entropy()
    meankl = tf.reduce_mean(kloldnew)
    meanent = tf.reduce_mean(ent)
    entbonus = entcoeff * meanent

    vferr = tf.reduce_mean(tf.square(pi.vf - ret))

    ratio = tf.exp(pi.pd.logp(ac) -
                   oldpi.pd.logp(ac))  # advantage * pnew / pold
    surrgain = tf.reduce_mean(ratio * atarg)

    optimgain = surrgain + entbonus
    losses = [optimgain, meankl, entbonus, surrgain, meanent]
    loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]

    dist = meankl

    all_var_list = get_trainable_variables('pi')
    # var_list = [v for v in all_var_list if v.name.startswith("pi/pol") or v.name.startswith("pi/logstd")]
    # vf_var_list = [v for v in all_var_list if v.name.startswith("pi/vff")]
    var_list = get_pi_trainable_variables("pi")
    vf_var_list = get_vf_trainable_variables("pi")
    # assert len(var_list) == len(vf_var_list) + 1
    d_adam = MpiAdam(reward_giver.get_trainable_variables())
    guidance_adam = MpiAdam(reward_guidance.get_trainable_variables())

    vfadam = MpiAdam(vf_var_list)

    get_flat = U.GetFlat(var_list)
    set_from_flat = U.SetFromFlat(var_list)
    klgrads = tf.gradients(dist, var_list)
    flat_tangent = tf.placeholder(dtype=tf.float32,
                                  shape=[None],
                                  name="flat_tan")
    shapes = [var.get_shape().as_list() for var in var_list]
    start = 0
    tangents = []
    for shape in shapes:
        sz = U.intprod(shape)
        tangents.append(tf.reshape(flat_tangent[start:start + sz], shape))
        start += sz
    gvp = tf.add_n([
        tf.reduce_sum(g * tangent)
        for (g, tangent) in zipsame(klgrads, tangents)
    ])  # pylint: disable=E1111
    fvp = U.flatgrad(gvp, var_list)

    assign_old_eq_new = U.function(
        [], [],
        updates=[
            tf.assign(oldv, newv)
            for (oldv,
                 newv) in zipsame(get_variables('oldpi'), get_variables('pi'))
        ])
    compute_losses = U.function([ob, ac, atarg], losses)
    compute_lossandgrad = U.function([ob, ac, atarg], losses +
                                     [U.flatgrad(optimgain, var_list)])
    compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
    compute_vflossandgrad = U.function([ob, ret],
                                       U.flatgrad(vferr, vf_var_list))

    @contextmanager
    def timed(msg):
        if rank == 0:
            print(colorize(msg, color='magenta'))
            tstart = time.time()
            yield
            print(
                colorize("done in %.3f seconds" % (time.time() - tstart),
                         color='magenta'))
        else:
            yield

    def allmean(x):
        assert isinstance(x, np.ndarray)
        out = np.empty_like(x)
        MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
        out /= nworkers
        return out

    U.initialize()
    th_init = get_flat()
    MPI.COMM_WORLD.Bcast(th_init, root=0)
    set_from_flat(th_init)
    d_adam.sync()
    guidance_adam.sync()
    vfadam.sync()
    if rank == 0:
        print("Init param sum", th_init.sum(), flush=True)

    # Prepare for rollouts
    # ----------------------------------------
    seg_gen = traj_segment_generator(pi,
                                     env,
                                     reward_giver,
                                     reward_guidance,
                                     timesteps_per_batch,
                                     stochastic=True,
                                     algo=algo,
                                     loss_percent=loss_percent)

    episodes_so_far = 0
    timesteps_so_far = 0
    iters_so_far = 0
    tstart = time.time()
    lenbuffer = deque(maxlen=40)  # rolling buffer for episode lengths
    rewbuffer = deque(maxlen=40)  # rolling buffer for episode rewards
    true_rewbuffer = deque(maxlen=40)

    assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1

    g_loss_stats = stats(loss_names)
    d_loss_stats = stats(reward_giver.loss_name)
    ep_stats = stats(["True_rewards", "Rewards", "Episode_length"])
    # if provide pretrained weight
    if pretrained_weight is not None:
        U.load_state(pretrained_weight, var_list=pi.get_variables())

    while True:
        if callback: callback(locals(), globals())
        if max_timesteps and timesteps_so_far >= max_timesteps:
            break
        elif max_episodes and episodes_so_far >= max_episodes:
            break
        elif max_iters and iters_so_far >= max_iters:
            break

        # Save model
        # if rank == 0 and iters_so_far % save_per_iter == 0 and ckpt_dir is not None:
        #     fname = os.path.join(ckpt_dir, task_name)
        #     os.makedirs(os.path.dirname(fname), exist_ok=True)
        #     saver = tf.train.Saver()
        #     saver.save(tf.get_default_session(), fname)

        logger.log("********** Iteration %i ************" % iters_so_far)

        # global flag_render
        # if iters_so_far > 0 and iters_so_far % 10 ==0:
        #     flag_render = True
        # else:
        #     flag_render = False

        def fisher_vector_product(p):
            return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p

        # ------------------ Update G ------------------
        logger.log("Optimizing Policy...")
        for _ in range(g_step):
            with timed("sampling"):
                seg = seg_gen.__next__()
            print('rewards', seg['rew'])
            add_vtarg_and_adv(seg, gamma, lam)
            # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
            ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[
                "tdlamret"]
            vpredbefore = seg[
                "vpred"]  # predicted value function before udpate
            atarg = (atarg - atarg.mean()) / atarg.std(
            )  # standardized advantage function estimate

            if hasattr(pi, "ob_rms"):
                pi.ob_rms.update(ob)  # update running mean/std for policy

            args = seg["ob"], seg["ac"], atarg
            fvpargs = [arr[::5] for arr in args]

            assign_old_eq_new(
            )  # set old parameter values to new parameter values
            with timed("computegrad"):
                *lossbefore, g = compute_lossandgrad(*args)
            lossbefore = allmean(np.array(lossbefore))
            g = allmean(g)
            if np.allclose(g, 0):
                logger.log("Got zero gradient. not updating")
            else:
                with timed("cg"):
                    stepdir = cg(fisher_vector_product,
                                 g,
                                 cg_iters=cg_iters,
                                 verbose=rank == 0)
                assert np.isfinite(stepdir).all()
                shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
                lm = np.sqrt(shs / max_kl)
                # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
                fullstep = stepdir / lm
                expectedimprove = g.dot(fullstep)
                surrbefore = lossbefore[0]
                stepsize = 1.0
                thbefore = get_flat()
                for _ in range(10):
                    thnew = thbefore + fullstep * stepsize
                    set_from_flat(thnew)
                    meanlosses = surr, kl, *_ = allmean(
                        np.array(compute_losses(*args)))
                    improve = surr - surrbefore
                    logger.log("Expected: %.3f Actual: %.3f" %
                               (expectedimprove, improve))
                    if not np.isfinite(meanlosses).all():
                        logger.log("Got non-finite value of losses -- bad!")
                    elif kl > max_kl * 1.5:
                        logger.log("violated KL constraint. shrinking step.")
                    elif improve < 0:
                        logger.log("surrogate didn't improve. shrinking step.")
                    else:
                        logger.log("Stepsize OK!")
                        break
                    stepsize *= .5
                else:
                    logger.log("couldn't compute a good step")
                    set_from_flat(thbefore)
                if nworkers > 1 and iters_so_far % 20 == 0:
                    paramsums = MPI.COMM_WORLD.allgather(
                        (thnew.sum(),
                         vfadam.getflat().sum()))  # list of tuples
                    assert all(
                        np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
            with timed("vf"):
                for _ in range(vf_iters):
                    for (mbob, mbret) in dataset.iterbatches(
                        (seg["ob"], seg["tdlamret"]),
                            include_final_partial_batch=False,
                            batch_size=128):
                        if hasattr(pi, "ob_rms"):
                            pi.ob_rms.update(
                                mbob)  # update running mean/std for policy
                        g = allmean(compute_vflossandgrad(mbob, mbret))
                        vfadam.update(g, vf_stepsize)

        g_losses = meanlosses
        for (lossname, lossval) in zip(loss_names, meanlosses):
            logger.record_tabular(lossname, lossval)
        logger.record_tabular("ev_tdlam_before",
                              explained_variance(vpredbefore, tdlamret))

        # ------------------ Update D ------------------
        logger.log("Optimizing Discriminator...")
        logger.log(fmt_row(13, reward_giver.loss_name))
        ob_expert, ac_expert = expert_dataset.get_next_batch(
            batch_size=len(ob))
        batch_size = 128
        d_losses = [
        ]  # list of tuples, each of which gives the loss for a minibatch
        with timed("Discriminator"):
            for (ob_batch, ac_batch) in dataset.iterbatches(
                (ob, ac),
                    include_final_partial_batch=False,
                    batch_size=batch_size):
                ob_expert, ac_expert = expert_dataset.get_next_batch(
                    batch_size=batch_size)
                # update running mean/std for reward_giver
                if hasattr(reward_giver, "obs_rms"):
                    reward_giver.obs_rms.update(
                        np.concatenate((ob_batch, ob_expert), 0))
                *newlosses, g = reward_giver.lossandgrad(ob_batch, ob_expert)
                d_adam.update(allmean(g), d_stepsize)
                d_losses.append(newlosses)
        logger.log(fmt_row(13, np.mean(d_losses, axis=0)))

        # ------------------ Update Guidance ------------
        logger.log("Optimizing Guidance...")

        logger.log(fmt_row(13, reward_guidance.loss_name))
        batch_size = 128
        guidance_losses = [
        ]  # list of tuples, each of which gives the loss for a minibatch
        with timed("Guidance"):
            for ob_batch, ac_batch in dataset.iterbatches(
                (ob, ac),
                    include_final_partial_batch=False,
                    batch_size=batch_size):
                ob_expert, ac_expert = expert_dataset.get_next_batch(
                    batch_size=batch_size)

                idx_condition = process_expert(ob_expert, ac_expert)
                pick_idx = (idx_condition >= loss_percent)
                # pick_idx = idx_condition

                ob_expert_p = ob_expert[pick_idx]
                ac_expert_p = ac_expert[pick_idx]

                ac_batch_p = []
                for each_ob in ob_expert_p:
                    tmp_ac, _, _, _ = pi.step(each_ob, stochastic=True)
                    ac_batch_p.append(tmp_ac)

                # update running mean/std for reward_giver
                if hasattr(reward_guidance, "obs_rms"):
                    reward_guidance.obs_rms.update(ob_expert_p)
                # reward_guidance.train(expert_s=ob_batch_p, agent_a=ac_batch_p, expert_a=ac_expert_p)
                *newlosses, g = reward_guidance.lossandgrad(
                    ob_expert_p, ac_batch_p, ac_expert_p)
                guidance_adam.update(allmean(g), d_stepsize)
                guidance_losses.append(newlosses)
        logger.log(fmt_row(13, np.mean(guidance_losses, axis=0)))

        lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]
                   )  # local values
        listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)  # list of tuples
        lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))
        true_rewbuffer.extend(true_rets)
        lenbuffer.extend(lens)
        rewbuffer.extend(rews)

        logger.record_tabular("EpLenMean", np.mean(lenbuffer))
        logger.record_tabular("EpRewMean", np.mean(rewbuffer))
        logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer))
        logger.record_tabular("EpThisIter", len(lens))
        episodes_so_far += len(lens)
        timesteps_so_far += sum(lens) * g_step
        iters_so_far += 1

        logger.record_tabular("EpisodesSoFar", episodes_so_far)
        logger.record_tabular("TimestepsSoFar", timesteps_so_far)
        logger.record_tabular("TimeElapsed", time.time() - tstart)

        if rank == 0:
            logger.dump_tabular()

    if ckpt_dir is not None:
        print('saving...')
        fname = os.path.join(ckpt_dir, task_name)
        os.makedirs(os.path.dirname(fname), exist_ok=True)
        pi.save(fname)
        print('save completely and the path:', fname)
 def num_params(self):
     parameters = filter(lambda p: p.requires_grad, self.parameters())
     parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
     logger.log('Trainable Parameters: %.3f million' % parameters)
예제 #56
0
def cancel(bot, update, user_data):
    user_data.clear()
    update.message.reply_text(CANCELED)
    logger.log(update.message.from_user, update.message.text, CANCELED)

    return ConversationHandler.END
예제 #57
0
def icmp(quiet):

    try:
        poll = get_config('poll')
        hosts = get_config('host')
    except IOError:
        print(
            '\n\nAn existing poll and/or host configuration file could not be found at:\n'
            + os.getcwd() + '/config\n\n')
        sys.exit(1)
    except json.decoder.JSONDecodeError:
        print(
            '\n\nAn invalid host configuration file has been found. Now exiting...\n\n'
        )
        sys.exit(1)

    print(
        '\n\n' + '#####################\n' + '#                   #\n' +
        '#   Polling Hosts   #\n' + '#                   #\n' +
        '#####################\n\n' + 'All hosts will be polled every',
        poll[0], 'second(s).\n' + 'Hosts will be marked as offline after',
        poll[1],
        'unanswered polls.\n' + 'To exit at any time, press CTRL+C.\n\n')

    index = [0] * len(hosts)

    while (True):
        for x in range(0, len(hosts)):
            if (os.name == 'nt'):
                r = subprocess.run(['ping', '-n', '1', hosts[x]],
                                   stdout=subprocess.DEVNULL)
            else:
                r = subprocess.run(['ping', '-c 1', '-w 3', hosts[x]],
                                   stdout=subprocess.DEVNULL)

            if (r.returncode != 0):
                index[x] += 1

                if (index[x] == 1):
                    if (quiet == False):
                        print('Camera', x + 1, ' @ ', hosts[x],
                              ' is offline!  Code: ', r.returncode)
                elif (index[x] == poll[1]):
                    if (quiet == False):
                        print('Camera', x + 1, ' @ ', hosts[x],
                              ' has been offline for', poll[1],
                              'polls! Email notification sent!  Code: ',
                              r.returncode)
                    msg = str('Camera ' + str(x + 1) + ' @ ' + str(hosts[x]) +
                              ' has been offline for ' + str(poll[1]) +
                              ' polls! Code: ' + str(r.returncode))
                    mail(msg)
                    log(msg)
                    #mysql_log(msg)

            elif (r.returncode == 0 and index[x] != 0):
                index[x] = 0
                if (quiet == False):
                    print('Camera', x + 1, ' @ ', hosts[x],
                          ' has come back online!')
        try:
            sleep(poll[0])
        except ValueError:
            print(
                '\n\nAn invalid poll configuration file has been found. Now exiting...\n\n'
            )
            sys.exit(1)
    def do_train(self,
                 paths,
                 dataset,
                 optimiser,
                 epochs,
                 batch_size,
                 step,
                 lr=1e-4,
                 valid_index=[],
                 use_half=False):
        if use_half:
            import apex
            optimiser = apex.fp16_utils.FP16_Optimizer(optimiser,
                                                       dynamic_loss_scale=True)
        for p in optimiser.param_groups:
            p['lr'] = lr
        criterion = nn.NLLLoss().cuda()
        k = 0
        saved_k = 0

        for e in range(epochs):
            trn_loader = DataLoader(
                dataset,
                collate_fn=lambda batch: env.collate(0, 16, 0, batch),
                batch_size=batch_size,
                num_workers=2,
                shuffle=True,
                pin_memory=True)
            start = time.time()
            running_loss_c = 0.
            running_loss_f = 0.

            iters = len(trn_loader)

            for i, (mels, coarse, fine, coarse_f,
                    fine_f) in enumerate(trn_loader):

                mels, coarse, fine, coarse_f, fine_f = mels.cuda(
                ), coarse.cuda(), fine.cuda(), coarse_f.cuda(), fine_f.cuda()
                coarse, fine, coarse_f, fine_f = [
                    t[:, hop_length:1 - hop_length]
                    for t in [coarse, fine, coarse_f, fine_f]
                ]
                if use_half:
                    mels = mels.half()
                    coarse_f = coarse_f.half()
                    fine_f = fine_f.half()

                x = torch.cat([
                    coarse_f[:, :-1].unsqueeze(-1),
                    fine_f[:, :-1].unsqueeze(-1), coarse_f[:, 1:].unsqueeze(-1)
                ],
                              dim=2)

                p_c, p_f, _h_n = self(x, mels)
                loss_c = criterion(p_c.transpose(1, 2).float(), coarse[:, 1:])
                loss_f = criterion(p_f.transpose(1, 2).float(), fine[:, 1:])
                loss = loss_c + loss_f

                optimiser.zero_grad()
                if use_half:
                    optimiser.backward(loss)
                else:
                    loss.backward()
                optimiser.step()
                running_loss_c += loss_c.item()
                running_loss_f += loss_f.item()

                self.after_update()

                speed = (i + 1) / (time.time() - start)
                avg_loss_c = running_loss_c / (i + 1)
                avg_loss_f = running_loss_f / (i + 1)

                step += 1
                k = step // 1000
                logger.status(
                    f'Epoch: {e+1}/{epochs} -- Batch: {i+1}/{iters} -- Loss: c={avg_loss_c:#.4} f={avg_loss_f:#.4} -- Speed: {speed:#.4} steps/sec -- Step: {k}k '
                )

            os.makedirs(paths.checkpoint_dir, exist_ok=True)
            torch.save(self.state_dict(), paths.model_path())
            np.save(paths.step_path(), step)
            logger.log_current_status()
            logger.log(
                f' <saved>; w[0][0] = {self.wavernn.gru.weight_ih_l0[0][0]}')
            if k > saved_k + 50:
                torch.save(self.state_dict(), paths.model_hist_path(step))
                saved_k = k
                self.do_generate(paths,
                                 step,
                                 dataset.path,
                                 valid_index,
                                 use_half=use_half)
예제 #59
0
 def _log(self, msg):
     logger.log(msg, ["client", str(self._port)])
예제 #60
0
def hammer():
    data = get_last_5_min_data()
    volume_prev = data[0]['previous']['Volume']
    volume_cur = data[1]['latest']['Volume']
    cur_h = data[1]['latest']['High']
    cur_l = data[1]['latest']['Low']
    cur_o = data[1]['latest']['Open']
    cur_c = data[1]['latest']['Close']
    if cur_o > cur_c:
        candle_color = 'red'
        candle_range = cur_h - cur_l
        body_range = cur_o - cur_c
        top_wick = cur_h - cur_o
        bottom_wick = cur_c - cur_l
        dic = {
            'candle_color': candle_color,
            'candle_range': candle_range,
            'body_range': body_range,
            'top_wick': top_wick,
            'bottom_wick': bottom_wick,
            'volume_prev': volume_prev,
            'volume_cur': volume_cur
        }
        log({
            'candle_color': candle_color,
            'candle_range': candle_range,
            'body_range': body_range,
            'top_wick': top_wick,
            'bottom_wick': bottom_wick,
            'volume_prev': volume_prev,
            'volume_cur': volume_cur
        })
    elif cur_c > cur_o:
        candle_color = 'grn'
        candle_range = cur_h - cur_l
        body_range = cur_c - cur_o
        top_wick = cur_h - cur_c
        bottom_wick = cur_o - cur_l
        dic = {
            'candle_color': candle_color,
            'candle_range': candle_range,
            'body_range': body_range,
            'top_wick': top_wick,
            'bottom_wick': bottom_wick,
            'volume_prev': volume_prev,
            'volume_cur': volume_cur
        }
        log({
            'candle_color': candle_color,
            'candle_range': candle_range,
            'body_range': body_range,
            'top_wick': top_wick,
            'bottom_wick': bottom_wick,
            'volume_prev': volume_prev,
            'volume_cur': volume_cur
        })
    else:
        dic = {'candle': 'indecision'}

    if 'bottom_wick' in dic.keys():
        if dic['volume_prev'] < dic['volume_cur']:
            if dic['bottom_wick'] > dic['top_wick'] and dic[
                    'candle_range'] > dic['body_range'] * 3:
                log(f'bullish hammer {dic}')
            elif dic['bottom_wick'] < dic['top_wick'] and dic[
                    'candle_range'] > dic['body_range'] * 3:
                log(f'shooting star {dic}')
    else:
        log('Indecision')
    return dic