Exemplo n.º 1
0
    def run_daemon(self):
        redis_servers = settings.get_redis_servers()

        for redis_server in redis_servers:
            
            info = InfoThread(redis_server["server"], redis_server["port"],
                              redis_server.get("password", None))
            self.threads.append(info)
            info.setDaemon(True)
            info.start()
        # In this particular case, running a single MONITOR client can reduce 
        # the throughput by more than 50%. Running more MONITOR clients will 
        # reduce throughput even more.
        try:
            doitems=0
            while self.active:
                time.sleep(1)
                doitems+=1
                stats_provider = RedisLiveDataProvider.get_provider()
                #try collection DB like:redis aofrewrite
                if(doitems %3600==0):
                    stats_provider.collection_database()
                
        except (KeyboardInterrupt, SystemExit):
            self.stop()
Exemplo n.º 2
0
    def run(self):
        """Runs the thread.
        """
        stats_provider = RedisLiveDataProvider.get_provider()
        pool = redis.ConnectionPool(host=self.server, port=self.port, db=0,
                                    password=self.password)
        monitor = Monitor(pool)
        commands = monitor.monitor()

        for command in commands:
            try:
                parts = command.split(" ")

                if len(parts) == 1:
                    continue

                epoch = float(parts[0].strip())
                timestamp = datetime.datetime.fromtimestamp(epoch)

                # Strip '(db N)' and '[N x.x.x.x:xx]' out of the monitor str
                if (parts[1] == "(db") or (parts[1][0] == "["):
                    parts = [parts[0]] + parts[3:]

                command = parts[1].replace('"', '').upper()

                if len(parts) > 2:
                    keyname = parts[2].replace('"', '').strip()
                else:
                    keyname = None

                if len(parts) > 3:
                    # TODO: This is probably more efficient as a list
                    # comprehension wrapped in " ".join()
                    arguments = ""
                    for x in xrange(3, len(parts)):
                        arguments += " " + parts[x].replace('"', '')
                    arguments = arguments.strip()
                else:
                    arguments = None

                if not command == 'INFO' and not command == 'MONITOR':
                    stats_provider.save_monitor_command(self.id, 
                                                        timestamp, 
                                                        command, 
                                                        str(keyname), 
                                                        str(arguments))

            except Exception, e:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print command
                print "==============================\n"

            if self.stopped():
                break
Exemplo n.º 3
0
    def run(self):
        """Runs the thread.
        """
        stats_provider = RedisLiveDataProvider.get_provider()
        pool = redis.ConnectionPool(host=self.server,
                                    port=self.port,
                                    db=0,
                                    password=self.password)
        monitor = Monitor(pool)
        commands = monitor.monitor()

        for command in commands:
            try:
                parts = command.split(" ")

                if len(parts) == 1:
                    continue

                epoch = float(parts[0].strip())
                timestamp = datetime.datetime.fromtimestamp(epoch)

                # Strip '(db N)' and '[N x.x.x.x:xx]' out of the monitor str
                if (parts[1] == "(db") or (parts[1][0] == "["):
                    parts = [parts[0]] + parts[3:]

                command = parts[1].replace('"', '').upper()

                if len(parts) > 2:
                    keyname = parts[2].replace('"', '').strip()
                else:
                    keyname = None

                if len(parts) > 3:
                    # TODO: This is probably more efficient as a list
                    # comprehension wrapped in " ".join()
                    arguments = ""
                    for x in xrange(3, len(parts)):
                        arguments += " " + parts[x].replace('"', '')
                    arguments = arguments.strip()
                else:
                    arguments = None

                if command not in IGNORED_COMMANDS:
                    stats_provider.save_monitor_command(
                        self.id, timestamp, command, str(keyname),
                        str(arguments))

            except Exception, e:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print command
                print "==============================\n"

            if self.stopped():
                break
Exemplo n.º 4
0
    def run(self):

        statsProvider = RedisLiveDataProvider.GetProvider()
        pool = redis.ConnectionPool(host=self.server,
                                    port=self.port,
                                    db=0,
                                    password=self.password)
        monitor = Monitor(pool)
        commands = monitor.monitor()

        for command in commands:
            try:

                parts = command.split(" ")

                if len(parts) == 1:
                    continue

                t = parts[0].strip()
                epoch = float(t)
                timestamp = datetime.datetime.fromtimestamp(epoch)

                # Strip '(db N)' and '[N x.x.x.x:xx]' out of the monitor str
                if (parts[1] == "(db") or (parts[1][0] == "["):
                    parts = [parts[0]] + parts[3:]

                command = parts[1].replace('"', '').upper()

                if len(parts) > 2:
                    keyname = parts[2].replace('"', '').strip()
                else:
                    keyname = None

                if len(parts) > 3:
                    arguments = ""
                    for x in xrange(3, len(parts)):
                        arguments += " " + parts[x].replace('"', '')
                    arguments = arguments.strip()
                else:
                    arguments = None

                if command != 'INFO' and command != 'MONITOR':
                    statsProvider.SaveMonitorCommand(self.id, timestamp,
                                                     command, str(keyname),
                                                     str(arguments))

            except Exception, e:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print command
                print "==============================\n"

            if self.stopped():
                break
class BaseController(tornado.web.RequestHandler):

    stats_provider = RedisLiveDataProvider.get_provider()

    def getStatsPerServer(self, server):
        try:
            connection = redis.StrictRedis(host=server[0],
                                           port=int(server[1]),
                                           socket_timeout=3)
            info = connection.info()
            # when instances down ,this maybe slowly...
            info.update({
                "server_name": server,
                "status": info.get("role"),
                "last_save_humanized": info.get("last_save_time"),
                "total_keys": connection.dbsize()
            })
            #master status
            role = info["role"]
            slaves = ""

            if (role == "master"):
                connected_slaves = (int)(info["connected_slaves"])
                slaves = ""
                for i in range(0, connected_slaves):
                    slaves += str(info["slave" + (str)(i)])
            else:
                master_host = info["master_host"]
                master_port = (str)(info["master_port"])
                master_link_status = info["master_link_status"]
                master_sync_in_progress = info["master_sync_in_progress"]
                if (master_host != ""):
                    slaves = master_host + ":" + (
                        str)(master_port) + "," + master_link_status
                    if (master_sync_in_progress == 1):
                        slaves += ",syncing"
            info['master_slaves'] = slaves

        except Exception, ex:
            print ex

            info = {
                "role": "down",
                "uptime_in_seconds": 0,
                "total_commands_processed": 0,
                "used_memory_human": "",
                "connected_clients": "",
                "status": "down",
                "server_name": server,
                "connected_clients": 0,
                "used_memory_human": '?',
            }

        return info
Exemplo n.º 6
0
    def run(self):
        """Does all the work.
        """
        stats_provider = RedisLiveDataProvider.get_provider()
        redis_client = redis.StrictRedis(host=self.server,
                                         port=self.port,
                                         db=0,
                                         password=self.password)

        # process the results from redis
        while not self.stopped():
            try:
                redis_info = redis_client.info()
                current_time = datetime.datetime.now()
                used_memory = int(redis_info['used_memory'])

                # used_memory_peak not available in older versions of redis
                try:
                    peak_memory = int(redis_info['used_memory_peak'])
                except:
                    peak_memory = used_memory

                stats_provider.save_memory_info(self.id, current_time,
                                                used_memory, peak_memory)
                stats_provider.save_info_command(self.id, current_time,
                                                 redis_info)

                # databases=[]
                # for key in sorted(redis_info.keys()):
                #     if key.startswith("db"):
                #         database = redis_info[key]
                #         database['name']=key
                #         databases.append(database)

                # expires=0
                # persists=0
                # for database in databases:
                #     expires+=database.get("expires")
                #     persists+=database.get("keys")-database.get("expires")

                # stats_provider.SaveKeysInfo(self.id, current_time, expires, persists)

                time.sleep(1)

            except Exception, e:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print "==============================\n"
    def run(self):
        """Does all the work.
        """
        stats_provider = RedisLiveDataProvider.get_provider()
        redis_client = redis.StrictRedis(host=self.server, port=self.port, db=0,
                                        password=self.password)

        # process the results from redis
        while not self.stopped():
            try:
                redis_info = redis_client.info()
                current_time = datetime.datetime.now()
                used_memory = int(redis_info['used_memory'])
                # print redis_info

                # used_memory_peak not available in older versions of redis
                try:
                    peak_memory = int(redis_info['used_memory_peak'])
                except:
                    peak_memory = used_memory

                stats_provider.save_memory_info(self.id, current_time, 
                                                used_memory, peak_memory)
                stats_provider.save_info_command(self.id, current_time, 
                                                 redis_info)

                # databases=[]
                # for key in sorted(redis_info.keys()):
                #     if key.startswith("db"):
                #         database = redis_info[key]
                #         database['name']=key
                #         databases.append(database)

                # expires=0
                # persists=0
                # for database in databases:
                #     expires+=database.get("expires")
                #     persists+=database.get("keys")-database.get("expires")

                # stats_provider.SaveKeysInfo(self.id, current_time, expires, persists)

                # time.sleep(1)

            except Exception, e:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print "==============================\n"
 def run_daemon(self):
     try:
         doitems = 0
         while self.active:
             try:
                 self.run_info()
             except Exception:
                 print traceback.format_exc()
                 
             time.sleep(10)
             doitems += 1
             
             # try collection DB like:redis bgrewriteaof
             if(doitems % 3600 == 0):
                 stats_provider = RedisLiveDataProvider.get_provider()
                 stats_provider.collection_database()
             
     except (KeyboardInterrupt, SystemExit):
         self.stop()
Exemplo n.º 9
0
    def run(self):
        """Does all the work.
        """

        stats_provider = RedisLiveDataProvider.get_provider()
 
        while not self.stopped():
            try:
                current_time = datetime.datetime.now()
		deadline_time = current_time - datetime.timedelta(days = self.days)
		stats_provider.delete_old_date(deadline_time)
		time.sleep(10)

            except Exception, e:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print "==============================\n"
Exemplo n.º 10
0
    def run(self):
        self.stats_provider = RedisLiveDataProvider.get_provider()
        redis_client = redis.StrictRedis(host=self.server,
                                         port=self.port,
                                         db=0,
                                         password=self.password,
                                         socket_timeout=3)

        doitems = 0
        while not self.stopped():
            time.sleep(self.monitor_tick)
            doitems += 1
            try:
                redis_info = {}
                current_time = datetime.datetime.now()

                try:
                    redis_info = redis_client.info()
                except Exception:
                    self.servicedown()
                    print traceback.format_exc()
                    continue

                # remove history
                if (doitems % 30 == 0):
                    delta = datetime.timedelta(seconds=self.reserved_min * 60)
                    start = current_time - delta
                    self.stats_provider.delete_history(self.id, start)

                self.LogInfo(redis_info, current_time, self.last)

                if (self.last2.gettick_sec(current_time) >= 60):
                    self.LogInfo(redis_info, current_time, self.last2)

                self.CheckMasterStatus(redis_info, current_time)

            except Exception:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print "==============================\n"
Exemplo n.º 11
0
    def run(self):
        self.stats_provider = RedisLiveDataProvider.get_provider()
        redis_client = redis.StrictRedis(host=self.server, port=self.port, db=0,
                                        password=self.password, socket_timeout=3)

        doitems = 0
        while not self.stopped():
            time.sleep(self.monitor_tick)
            doitems += 1
            try:
                redis_info = {}
                current_time = datetime.datetime.now()
               
                try:
                    redis_info = redis_client.info()
                except Exception:
                    self.servicedown()
                    print traceback.format_exc()
                    continue
                
                # remove history
                if(doitems % 30 == 0):
                    delta = datetime.timedelta(seconds=self.reserved_min * 60)
                    start = current_time - delta
                    self.stats_provider.delete_history(self.id, start)
                
                
                self.LogInfo(redis_info, current_time, self.last)
                
                if(self.last2.gettick_sec(current_time) >= 60):
                    self.LogInfo(redis_info, current_time, self.last2)
                
                self.CheckMasterStatus(redis_info, current_time)
                
            except Exception:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print "==============================\n"
Exemplo n.º 12
0
    def run(self):

        statsProvider = RedisLiveDataProvider.GetProvider()
        redisClient = redis.StrictRedis(host=self.server, port=self.port, db=0)

        while not self.stopped():
            try:
                redisInfo = redisClient.info()
                currentTime = datetime.datetime.now()
                used_memory = int(redisInfo['used_memory'])
                peak_memory = int(redisInfo['used_memory_peak'])
                statsProvider.SaveMemoryInfo(self.id, currentTime, used_memory,
                                             peak_memory)
                statsProvider.SaveInfoCommand(self.id, currentTime, redisInfo)

                # databases=[]
                # for key in sorted(redisInfo.keys()):
                # 	if key.startswith("db"):
                # 		database = redisInfo[key]
                # 		database['name']=key
                # 		databases.append(database)

                # expires=0
                # persists=0
                # for database in databases:
                # 	expires+=database.get("expires")
                # 	persists+=database.get("keys")-database.get("expires")

                # statsProvider.SaveKeysInfo(self.id, currentTime, expires, persists)

                time.sleep(1)

            except Exception, e:
                tb = traceback.format_exc()
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print "==============================\n"
Exemplo n.º 13
0
class BaseController(tornado.web.RequestHandler):

    stats_provider = RedisLiveDataProvider.get_provider()

    def getStatsPerServer(self, server):
        try:
            connection = redis.Redis(host=server[0],
                                     port=(int)(server[1]),
                                     db=0,
                                     socket_timeout=0.1)
            info = connection.info()
            # when instances down ,this maybe slowly...
            info.update({
                "server_name": server,
                "status": info.get("role"),
                "last_save_humanized": info.get("last_save_time")
            })

            #master status
            role = info["role"]
            slaves = ""

            if (role == "master"):
                connected_slaves = (int)(info["connected_slaves"])
                slaves = ""
                for i in range(0, connected_slaves):
                    slaves += str(info["slave" + (str)(i)])
            else:
                master_host = info["master_host"]
                master_port = (str)(info["master_port"])
                master_link_status = info["master_link_status"]
                master_sync_in_progress = info["master_sync_in_progress"]
                if (master_host != ""):
                    slaves = master_host + ":" + (
                        str)(master_port) + "," + master_link_status
                    if (master_sync_in_progress == 1):
                        slaves += ",syncing"
            info['master_slaves'] = slaves

        except redis.exceptions.ConnectionError:
            info = {
                "role": "down",
                "uptime_in_seconds": 0,
                "total_commands_processed": 0,
                "used_memory_human": "",
                "connected_clients": "",
                "status": "down",
                "server_name": server,
                "connected_clients": 0,
                "used_memory_human": '?',
            }

        return info

    def datetime_to_list(self, datetime):
        """Converts a datetime to a list.

        Args:
            datetime (datetime): The datetime to convert.
        """
        parsed_date = dateutil.parser.parse(datetime)
        # don't return the last two fields, we don't want them.
        return tuple(parsed_date.timetuple())[:-2]

    # todo : fix this
    def average_data(self, data):
        """Averages data.

        TODO: More docstring here, once functionality is understood.
        """
        average = []

        deviation = 1024 * 1024

        start = dateutil.parser.parse(data[0][0])
        end = dateutil.parser.parse(data[-1][0])
        difference = end - start
        weeks, days = divmod(difference.days, 7)
        minutes, seconds = divmod(difference.seconds, 60)
        hours, minutes = divmod(minutes, 60)

        # TODO: These if/elif/else branches chould probably be broken out into
        # individual functions to make it easier to follow what's going on.
        if difference.days > 0:
            current_max = 0
            current_current = 0
            current_d = 0

            for dt, max_memory, current_memory in data:
                d = dateutil.parser.parse(dt)
                if d.day != current_d:
                    current_d = d.day
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                else:
                    if max_memory > current_max or \
                       current_memory > current_current:
                        average.pop()
                        average.append([dt, max_memory, current_memory])
                        current_max = max_memory
                        current_current = current_memory
        elif hours > 0:
            current_max = 0
            current_current = 0
            current = -1
            keep_flag = False

            for dt, max_memory, current_memory in data:
                d = dateutil.parser.parse(dt)
                if d.hour != current:
                    current = d.hour
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = False
                elif abs(max_memory - current_max) > deviation or \
                     abs(current_memory - current_current) > deviation:
                    # average.pop()
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = True
                elif max_memory > current_max or \
                     current_memory > current_current:
                    if keep_flag != True:
                        average.pop()
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = False
        else:
            current_max = 0
            current_current = 0
            current_m = -1
            keep_flag = False
            for dt, max_memory, current_memory in data:
                d = dateutil.parser.parse(dt)
                if d.minute != current_m:
                    current_m = d.minute
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = False
                elif abs(max_memory - current_max) > deviation or \
                     abs(current_memory - current_current) > deviation:
                    # average.pop()
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = True
                elif max_memory > current_max or \
                    current_memory > current_current:
                    if keep_flag != True:
                        average.pop()
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = False

        return average
Exemplo n.º 14
0
    def run(self):
        """Does all the work.
        """
        monitor_tick=2  #log times every sec
        reserved_min = 1440*7 # data reserved time,1day=1440
        
        
        stats_provider = RedisLiveDataProvider.get_provider()
        redis_client = redis.StrictRedis(host=self.server, port=self.port, db=0,
                                        password=self.password)

        last_expired_keys=0
        last_evicted_keys=0
        last_keyspace_hits=0
        last_keyspace_misses=0
        last_total_commands_processed=0
        last_role_status=""
        last_role={}
        
        doitems=0
        # process the results from redis
        while not self.stopped():
            time.sleep(monitor_tick)
            doitems+=1
            try:
                redis_info={}
                current_time = datetime.datetime.now()
                redis_info = redis_client.info()
                
                # not save,for it let aof too large in redis
#                 try:
#                     redis_info = redis_client.info()
#                 except Exception, e:
#                     redis_info['role']='down'
#                     redis_info['uptime_in_seconds']=0
#                     redis_info['total_commands_processed']=0
#                     redis_info['used_memory_human']=''
#                     redis_info['connected_clients']=''
#                     
#                     stats_provider.save_info_command(self.id, current_time,
#                                                  redis_info)
#                     print "==============================\n"
#                     print datetime.datetime.now()
#                     print traceback.format_exc()
#                     print "==============================\n"
#                     continue
#                             
#                 info all
#                 stats_provider.save_info_command(self.id, current_time,
#                                                  redis_info)
               
                #try remove history
                if(doitems %30==0):
                    delta = datetime.timedelta(seconds=reserved_min*60)
                    start = current_time - delta
                    stats_provider.delete_history(self.id,start)
                
                  
                #memory
                used_memory = int(redis_info['used_memory'])

                # used_memory_peak not available in older versions of redis
                try:
                    peak_memory = int(redis_info['used_memory_peak'])
                except:
                    peak_memory = used_memory
                
#                 stats_provider.save_memory_info(self.id, current_time,
#                                                 used_memory, peak_memory)
#                
                #keys info
                databases=[]
                for key in sorted(redis_info.keys()):
                    if key.startswith("db"):
                        database = redis_info[key]
                        database['name']=key
                        databases.append(database)
 
                expires=0
                persists=0
                for database in databases:
                    expires+=database.get("expires")
                    persists+=database.get("keys")-database.get("expires")
                
               
                expired_keys=redis_info["expired_keys"]
                evicted_keys=redis_info["evicted_keys"]
                keyspace_hits=redis_info["keyspace_hits"]
                keyspace_misses=redis_info["keyspace_misses"]
                total_commands_processed=redis_info["total_commands_processed"]
                
                expired=0
                evicted=0
                if(last_expired_keys>0 or last_evicted_keys>0):
                    expired=expired_keys-last_expired_keys
                    if(expired>=0):
                        expired= (int)(expired/monitor_tick)
                        last_expired_keys=expired_keys
                    else:
                        expired=0
                        last_expired_keys=0
                        
                    evicted=evicted_keys-last_evicted_keys 
                    if(evicted>=0):
                        evicted= (int)(evicted/monitor_tick)
                        last_evicted_keys= evicted_keys
                    else:
                        evicted=0
                        last_evicted_keys=0
                else:
                    last_expired_keys=expired_keys
                    last_evicted_keys=evicted_keys
                
                hit_rate=0
                if(last_keyspace_hits>0 or last_keyspace_misses>0):
                    hits=keyspace_hits-last_keyspace_hits
                    miss=keyspace_misses - last_keyspace_misses
                    if(hits>=0 and miss>=0):
                        total=hits+miss
                        if(total>0):
                            hit_rate= (int)((hits*100)/(hits+miss))
                            last_keyspace_hits=keyspace_hits
                            last_keyspace_misses=keyspace_misses
                    else:
                        last_keyspace_hits=0
                        last_keyspace_misses =0
                else:
                    last_keyspace_hits=keyspace_hits
                    last_keyspace_misses=keyspace_misses
                    
                commands=0
                if(last_total_commands_processed>0):
                    commands=total_commands_processed-last_total_commands_processed 
                    if(commands>=0):
                        commands=(int)(commands/monitor_tick)
                        last_total_commands_processed=total_commands_processed
                    else:
                        last_total_commands_processed=0
                        commands=0
                else:
                    last_total_commands_processed=total_commands_processed
                    
                stats_provider.save_keys_Info(self.id, current_time, expires, persists,
                                            expired,evicted,hit_rate,commands,used_memory, peak_memory)
                
                #master status
                role=redis_info["role"]
                role_status={}
                
                if(role=="master"):
                    connected_slaves=(int)(redis_info["connected_slaves"])
                    slaves=""
                    for i in range(0,connected_slaves):
                        slaves+=redis_info["slave"+(str)(i)]
                        
                    role_status={"role":role,"slaves":slaves}
                else:
                    master_host=redis_info["master_host"]
                    master_port=(str)(redis_info["master_port"])
                    master_link_status=redis_info["master_link_status"]
                    master_sync_in_progress=redis_info["master_sync_in_progress"]
                    role_status={"role":role,                                            
                                           "master_host_port":master_host+":"+master_port,
                                           "master_link_status":master_link_status,
                                           "master_sync_in_progress":master_sync_in_progress }
                
                role_cur=json.dumps(role_status)
                if(role_cur!=last_role_status):
                    #monitor first start,not save
                    if(last_role_status!=""):
                        stats_provider.save_status_info(self.id, current_time, role_status)
                        self.sendslavesms(role_status,last_role)
                        
                    last_role_status=role_cur
                    last_role=role_status

            except Exception, e:
                last_expired_keys=0
                last_evicted_keys=0
                last_keyspace_hits=0
                last_keyspace_misses=0
                last_total_commands_processed=0
                
                tb = traceback.format_exc()
                
                print "==============================\n"
                print datetime.datetime.now()
                print tb
                print "==============================\n"
Exemplo n.º 15
0
class BaseController(tornado.web.RequestHandler):

    stats_provider = RedisLiveDataProvider.get_provider()

    def datetime_to_list(self, datetime):
        """Converts a datetime to a list.

        Args:
            datetime (datetime): The datetime to convert.
        """
        parsed_date = dateutil.parser.parse(datetime)
        # don't return the last two fields, we don't want them.
        return tuple(parsed_date.timetuple())[:-2]

    # todo : fix this
    def average_data(self, data):
        """Averages data.

        TODO: More docstring here, once functionality is understood.
        """
        average = []

        deviation = 1024 * 1024

        start = dateutil.parser.parse(data[0][0])
        end = dateutil.parser.parse(data[-1][0])
        difference = end - start
        weeks, days = divmod(difference.days, 7)
        minutes, seconds = divmod(difference.seconds, 60)
        hours, minutes = divmod(minutes, 60)

        # TODO: These if/elif/else branches chould probably be broken out into
        # individual functions to make it easier to follow what's going on.
        if difference.days > 0:
            current_max = 0
            current_current = 0
            current_d = 0

            for dt, max_memory, current_memory in data:
                d = dateutil.parser.parse(dt)
                if d.day != current_d:
                    current_d = d.day
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                else:
                    if max_memory > current_max or \
                       current_memory > current_current:
                        average.pop()
                        average.append([dt, max_memory, current_memory])
                        current_max = max_memory
                        current_current = current_memory
        elif hours > 0:
            current_max = 0
            current_current = 0
            current = -1
            keep_flag = False

            for dt, max_memory, current_memory in data:
                d = dateutil.parser.parse(dt)
                if d.hour != current:
                    current = d.hour
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = False
                elif abs(max_memory - current_max) > deviation or \
                        abs(current_memory - current_current) > deviation:
                    # average.pop()
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = True
                elif max_memory > current_max or \
                        current_memory > current_current:
                    if keep_flag != True:
                        average.pop()
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = False
        else:
            current_max = 0
            current_current = 0
            current_m = -1
            keep_flag = False
            for dt, max_memory, current_memory in data:
                d = dateutil.parser.parse(dt)
                if d.minute != current_m:
                    current_m = d.minute
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = False
                elif abs(max_memory - current_max) > deviation or \
                        abs(current_memory - current_current) > deviation:
                    # average.pop()
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = True
                elif max_memory > current_max or \
                        current_memory > current_current:
                    if keep_flag != True:
                        average.pop()
                    average.append([dt, max_memory, current_memory])
                    current_max = max_memory
                    current_current = current_memory
                    keep_flag = False

        return average
Exemplo n.º 16
0
class BaseController(tornado.web.RequestHandler):

    statsProvider = RedisLiveDataProvider.GetProvider()

    def DateTimeToList(self, datetime):
        parsedDate = dateutil.parser.parse(datetime)
        return [
            parsedDate.year, parsedDate.month, parsedDate.day, parsedDate.hour,
            parsedDate.minute, parsedDate.second
        ]

    # todo : fix this
    def AverageData(self, data):
        average = []

        deviation = 1024 * 1024

        start = dateutil.parser.parse(data[0][0])
        end = dateutil.parser.parse(data[-1][0])
        difference = end - start
        weeks, days = divmod(difference.days, 7)
        minutes, seconds = divmod(difference.seconds, 60)
        hours, minutes = divmod(minutes, 60)

        if difference.days > 0:
            current_max = 0
            current_current = 0
            current_d = 0
            for dt, maxMemory, currentMemory in data:
                d = dateutil.parser.parse(dt)
                if d.day != current_d:
                    current_d = d.day
                    average.append([dt, maxMemory, currentMemory])
                    current_max = maxMemory
                    current_current = currentMemory
                else:
                    if maxMemory > current_max or currentMemory > current_current:
                        average.pop()
                        average.append([dt, maxMemory, currentMemory])
                        current_max = maxMemory
                        current_current = currentMemory
        elif hours > 0:
            current_max = 0
            current_current = 0
            current = -1
            keep_flag = False
            for dt, maxMemory, currentMemory in data:
                d = dateutil.parser.parse(dt)
                if d.hour != current:
                    current = d.hour
                    average.append([dt, maxMemory, currentMemory])
                    current_max = maxMemory
                    current_current = currentMemory
                    keep_flag = False
                elif abs(maxMemory - current_max) > deviation or abs(
                        currentMemory - current_current) > deviation:
                    #average.pop()
                    average.append([dt, maxMemory, currentMemory])
                    current_max = maxMemory
                    current_current = currentMemory
                    keep_flag = True
                elif maxMemory > current_max or currentMemory > current_current:
                    if keep_flag != True:
                        average.pop()
                    average.append([dt, maxMemory, currentMemory])
                    current_max = maxMemory
                    current_current = currentMemory
                    keep_flag = False
        else:
            current_max = 0
            current_current = 0
            current_m = -1
            keep_flag = False
            for dt, maxMemory, currentMemory in data:
                d = dateutil.parser.parse(dt)
                if d.minute != current_m:
                    current_m = d.minute
                    average.append([dt, maxMemory, currentMemory])
                    current_max = maxMemory
                    current_current = currentMemory
                    keep_flag = False
                elif abs(maxMemory - current_max) > deviation or abs(
                        currentMemory - current_current) > deviation:
                    #average.pop()
                    average.append([dt, maxMemory, currentMemory])
                    current_max = maxMemory
                    current_current = currentMemory
                    keep_flag = True
                elif maxMemory > current_max or currentMemory > current_current:
                    if keep_flag != True:
                        average.pop()
                    average.append([dt, maxMemory, currentMemory])
                    current_max = maxMemory
                    current_current = currentMemory
                    keep_flag = False

        return average