示例#1
0
                                    sep=";")

        monitoring_df.columns = [
            'server_name', 'Memory_used', 'Memory_free', 'Memory_buff',
            'Memory_cached', 'Net_recv', 'Net_send', 'Disk_read', 'Disk_write',
            'System_la1', 'CPU_user', 'CPU_system', 'CPU_iowait'
        ]
        monitoring_df.index = pd.to_datetime(
            dateconv((monitoring_df.index.values)))
        monitoring_df.index.names = ['timestamp']

        unique_servers = monitoring_df['server_name'].unique()
        for server_ in unique_servers:
            if not Server.objects.filter(server_name=server_).exists():
                print "Adding new server: " + server_
                s = Server(server_name=server_)
                s.save()
            server_id = s.id
            if not ServerMonitoringData.objects.filter(
                    server_id=server_id, test_id=test_id).exists():
                df_server = monitoring_df[(
                    monitoring_df.server_name == server_)]
                output_json = json.loads(df_server.to_json(orient='index',
                                                           date_format='iso'),
                                         object_pairs_hook=OrderedDict)
                for row in output_json:
                    data = {
                        'timestamp': row,
                        'Memory_used': output_json[row]['Memory_used'],
                        'Memory_free': output_json[row]['Memory_free'],
                        'Memory_buff': output_json[row]['Memory_buff'],
示例#2
0
def generate_data(t_id):
    print "Parse and generate test data: " + str(t_id)
    test_running = TestRunning.objects.get(id=t_id)
    if not Test.objects.filter(path=test_running.workspace).exists():
        test = Test(
            project_id=test_running.project_id,
            path=test_running.workspace,
            display_name=test_running.display_name,
            start_time=test_running.start_time,
            end_tiem=test_running.end_time,
            build_number=0,
            show=True)
        test.save()
    else:
        test = Test.objects.get(path=test_running.workspace)
    project_id = test.project_id
    test_id = test.id
    jmeter_results_file = test_running.result_file_dest
    if os.path.exists(jmeter_results_file):
        df = pd.DataFrame()
        if os.stat(jmeter_results_file).st_size > 1000007777:
            print "Executing a parse for a huge file"
            chunks = pd.read_table(
                jmeter_results_file, sep=',', index_col=0, chunksize=3000000)
            for chunk in chunks:
                chunk.columns = [
                    'response_time', 'url', 'responseCode', 'success', 'threadName',
                    'failureMessage', 'grpThreads', 'allThreads'
                ]
                chunk = chunk[~chunk['URL'].str.contains('exclude_')]
                df = df.append(chunk)
                print "Parsing a huge file,size: " + str(df.size)
        else:
            df = pd.read_csv(
                jmeter_results_file, index_col=0, low_memory=False)
            df.columns = [
                'response_time', 'url', 'responseCode', 'success', 'threadName',
                'failureMessage', 'grpThreads', 'allThreads'
            ]
            df = df[~df['url'].str.contains('exclude_', na=False)]

        df.columns = [
            'response_time', 'url', 'responseCode', 'success', 'threadName',
            'failureMessage', 'grpThreads', 'allThreads'
        ]

        #convert timestamps to normal date/time
        df.index = pd.to_datetime(dateconv((df.index.values / 1000)))
        num_lines = df['response_time'].count()
        print "Number of lines in filrue: %d." % num_lines
        unique_urls = df['url'].unique()
        for url in unique_urls:
            url = str(url)
            if not Action.objects.filter(
                    url=url, project_id=project_id).exists():
                print "Adding new action: " + str(url) + " project_id: " + str(
                    project_id)
                a = Action(url=url, project_id=project_id)
                a.save()
            a = Action.objects.get(url=url, project_id=project_id)
            action_id = a.id
            if not TestActionData.objects.filter(
                    action_id=action_id, test_id=test_id).exists():
                print "Adding action data: " + url
                df_url = df[(df.url == url)]
                url_data = pd.DataFrame()
                df_url_gr_by_ts = df_url.groupby(pd.TimeGrouper(freq='1Min'))
                url_data['avg'] = df_url_gr_by_ts.response_time.mean()
                url_data['median'] = df_url_gr_by_ts.response_time.median()
                url_data['count'] = df_url_gr_by_ts.success.count()
                df_url_gr_by_ts_only_errors = df_url[(
                    df_url.success == False
                )].groupby(pd.TimeGrouper(freq='1Min'))
                url_data[
                    'errors'] = df_url_gr_by_ts_only_errors.success.count()
                url_data['test_id'] = test_id
                url_data['url'] = url
                output_json = json.loads(
                    url_data.to_json(orient='index', date_format='iso'),
                    object_pairs_hook=OrderedDict)
                for row in output_json:
                    data = {
                        'timestamp': row,
                        'avg': output_json[row]['avg'],
                        'median': output_json[row]['median'],
                        'count': output_json[row]['count'],
                        'url': output_json[row]['url'],
                        'errors': output_json[row]['errors'],
                        'test_id': output_json[row]['test_id'],
                    }
                    test_action_data = TestActionData(
                        test_id=output_json[row]['test_id'],
                        action_id=action_id,
                        data=data)
                    test_action_data.save()
        
        zip_results_file(jmeter_results_file)
        
        test_overall_data = pd.DataFrame()
        df_gr_by_ts = df.groupby(pd.TimeGrouper(freq='1Min'))
        test_overall_data['avg'] = df_gr_by_ts.response_time.mean()
        test_overall_data['median'] = df_gr_by_ts.response_time.median()
        test_overall_data['count'] = df_gr_by_ts.response_time.count()
        test_overall_data['test_id'] = test_id
        output_json = json.loads(
            test_overall_data.to_json(orient='index', date_format='iso'),
            object_pairs_hook=OrderedDict)
        for row in output_json:
            data = {
                'timestamp': row,
                'avg': output_json[row]['avg'],
                'median': output_json[row]['median'],
                'count': output_json[row]['count']
            }
            test_data = TestData(
                test_id=output_json[row]['test_id'], data=data)
            test_data.save()
    else:
        print "Result file does not exist"

    monitoring_results_file = test_running.monitoring_file_dest
    if os.path.exists(monitoring_results_file):
        f = open(monitoring_results_file, "r")
        lines = f.readlines()
        f.close()
        f = open(monitoring_results_file, "w")
        for line in lines:
            if not ('start' in line):
                f.write(line)

        f.close()
        monitoring_df = pd.read_csv(monitoring_results_file, index_col=1, sep=";")

        monitoring_df.columns = [
            'server_name', 'Memory_used', 'Memory_free', 'Memory_buff',
            'Memory_cached', 'Net_recv', 'Net_send', 'Disk_read', 'Disk_write',
            'System_la1', 'CPU_user', 'CPU_system', 'CPU_iowait'
        ]
        monitoring_df.index = pd.to_datetime(
            dateconv((monitoring_df.index.values)))
        monitoring_df.index.names = ['timestamp']

        unique_servers = monitoring_df['server_name'].unique()
        for server_ in unique_servers:
            if not Server.objects.filter(
                    server_name=server_).exists():
                print "Adding new server: " + server_
                s = Server(
                    server_name=server_
                )
                s.save()
            server_id = s.id
            if not ServerMonitoringData.objects.filter(
                    server_id=server_id,
                    test_id=test_id).exists():
                df_server = monitoring_df[(
                    monitoring_df.server_name == server_)]
                output_json = json.loads(
                    df_server.to_json(orient='index', date_format='iso'),
                    object_pairs_hook=OrderedDict)
                for row in output_json:
                    data = {
                        'timestamp': row,
                        'Memory_used': output_json[row]['Memory_used'],
                        'Memory_free': output_json[row]['Memory_free'],
                        'Memory_buff': output_json[row]['Memory_buff'],
                        'Memory_cached': output_json[row]['Memory_cached'],
                        'Net_recv': output_json[row]['Net_recv'],
                        'Net_send': output_json[row]['Net_send'],
                        'Disk_read': output_json[row]['Disk_read'],
                        'Disk_write': output_json[row]['Disk_write'],
                        'System_la1': output_json[row]['System_la1'],
                        'CPU_user': output_json[row]['CPU_user'],
                        'CPU_system': output_json[row]['CPU_system'],
                        'CPU_iowait': output_json[row]['CPU_iowait']
                    }
                    server_monitoring_data = ServerMonitoringData(
                        test_id=test_id, server_id=server_id, data=data
                    )
                    server_monitoring_data.save()
    else:
        print "Result file does not exist"

    return True
示例#3
0
def generate_test_results_data(test_id,
                               project_id,
                               jmeter_results_file_path='',
                               jmeter_results_file_fields=[],
                               monitoring_results_file_path='',
                               monitoring_results_file_fields=[],
                               data_resolution='1Min'):
    data_resolution_id = TestDataResolution.objects.get(
        frequency=data_resolution).id
    if not jmeter_results_file_fields:
        jmeter_results_file_fields = [
            'response_time', 'url', 'responseCode', 'success', 'threadName',
            'failureMessage', 'grpThreads', 'allThreads'
        ]
    if not monitoring_results_file_fields:
        monitoring_results_file_fields = [
            'server_name', 'Memory_used', 'Memory_free', 'Memory_buff',
            'Memory_cached', 'Net_recv', 'Net_send', 'Disk_read', 'Disk_write',
            'System_la1', 'CPU_user', 'CPU_system', 'CPU_iowait'
        ]
    jmeter_results_file = jmeter_results_file_path
    if os.path.exists(jmeter_results_file):
        df = pd.DataFrame()
        if os.stat(jmeter_results_file).st_size > 1000007777:
            logger.debug("Executing a parse for a huge file")
            chunks = pd.read_table(jmeter_results_file,
                                   sep=',',
                                   index_col=0,
                                   chunksize=3000000)

            for chunk in chunks:
                for host in hosts:
                    t = threading.Thread(target=get_host_info,
                                         args=(
                                             host,
                                             load_generators_info,
                                         ))
                    t.start()
                    threads.append(t)
                for t in threads:
                    t.join()
                chunk.columns = jmeter_results_file_fields.split(',')
                chunk = chunk[~chunk['URL'].str.contains('exclude_')]
                df = df.append(chunk)
        else:
            df = pd.read_csv(jmeter_results_file,
                             index_col=0,
                             low_memory=False)
            df.columns = jmeter_results_file_fields
            df = df[~df['url'].str.contains('exclude_', na=False)]

        df.columns = jmeter_results_file_fields

        df.index = pd.to_datetime(dateconv((df.index.values / 1000)))
        num_lines = df['response_time'].count()
        logger.debug('Number of lines in file: {}'.format(num_lines))
        unique_urls = df['url'].unique()
        for url in unique_urls:
            url = str(url)
            if not Action.objects.filter(url=url,
                                         project_id=project_id).exists():
                logger.debug("Adding new action: " + str(url) +
                             " project_id: " + str(project_id))
                a = Action(url=url, project_id=project_id)
                a.save()
            a = Action.objects.get(url=url, project_id=project_id)
            action_id = a.id
            if not TestActionData.objects.filter(
                    action_id=action_id,
                    test_id=test_id,
                    data_resolution_id=data_resolution_id).exists():
                logger.debug("Adding action data: {}".format(url))
                df_url = df[(df.url == url)]
                url_data = pd.DataFrame()
                df_url_gr_by_ts = df_url.groupby(
                    pd.TimeGrouper(freq=data_resolution))
                url_data['avg'] = df_url_gr_by_ts.response_time.mean()
                url_data['median'] = df_url_gr_by_ts.response_time.median()
                url_data['count'] = df_url_gr_by_ts.success.count()
                df_url_gr_by_ts_only_errors = df_url[(
                    df_url.success == False)].groupby(
                        pd.TimeGrouper(freq=data_resolution))
                url_data['errors'] = df_url_gr_by_ts_only_errors.success.count(
                )
                url_data['test_id'] = test_id
                url_data['url'] = url
                output_json = json.loads(url_data.to_json(orient='index',
                                                          date_format='iso'),
                                         object_pairs_hook=OrderedDict)
                for row in output_json:
                    data = {
                        'timestamp': row,
                        'avg': output_json[row]['avg'],
                        'median': output_json[row]['median'],
                        'count': output_json[row]['count'],
                        'url': output_json[row]['url'],
                        'errors': output_json[row]['errors'],
                        'test_id': output_json[row]['test_id'],
                    }
                    test_action_data = TestActionData(
                        test_id=output_json[row]['test_id'],
                        action_id=action_id,
                        data_resolution_id=data_resolution_id,
                        data=data)
                    test_action_data.save()
                if not TestActionAggregateData.objects.filter(
                        action_id=action_id, test_id=test_id).exists():
                    url_agg_data = dict(
                        json.loads(
                            df_url['response_time'].describe().to_json()))
                    url_agg_data['99%'] = df_url['response_time'].quantile(.99)
                    url_agg_data['90%'] = df_url['response_time'].quantile(.90)
                    url_agg_data['weight'] = float(
                        df_url['response_time'].sum())
                    url_agg_data['errors'] = df_url[(
                        df_url['success'] == False)]['success'].count()
                    test_action_aggregate_data = TestActionAggregateData(
                        test_id=test_id,
                        action_id=action_id,
                        data=url_agg_data)
                    test_action_aggregate_data.save()

        zip_results_file(jmeter_results_file)
        if not TestData.objects.filter(
                test_id=test_id,
                data_resolution_id=data_resolution_id).exists():
            test_overall_data = pd.DataFrame()
            df_gr_by_ts = df.groupby(pd.TimeGrouper(freq=data_resolution))
            test_overall_data['avg'] = df_gr_by_ts.response_time.mean()
            test_overall_data['median'] = df_gr_by_ts.response_time.median()
            test_overall_data['count'] = df_gr_by_ts.response_time.count()
            test_overall_data['test_id'] = test_id
            output_json = json.loads(test_overall_data.to_json(
                orient='index', date_format='iso'),
                                     object_pairs_hook=OrderedDict)
            for row in output_json:
                data = {
                    'timestamp': row,
                    'avg': output_json[row]['avg'],
                    'median': output_json[row]['median'],
                    'count': output_json[row]['count']
                }
                test_data = TestData(test_id=output_json[row]['test_id'],
                                     data_resolution_id=data_resolution_id,
                                     data=data)
                test_data.save()
    monitoring_results_file = monitoring_results_file_path
    if os.path.exists(monitoring_results_file):
        f = open(monitoring_results_file, "r")
        lines = f.readlines()
        f.close()
        f = open(monitoring_results_file, "w")
        for line in lines:
            if not ('start' in line):
                f.write(line)

        f.close()
        monitoring_df = pd.read_csv(monitoring_results_file,
                                    index_col=1,
                                    sep=";")
        monitoring_df.columns = monitoring_results_file_fields
        monitoring_df.index = pd.to_datetime(
            dateconv((monitoring_df.index.values)))
        monitoring_df.index.names = ['timestamp']

        unique_servers = monitoring_df['server_name'].unique()
        for server_ in unique_servers:
            if not Server.objects.filter(server_name=server_).exists():
                s = Server(server_name=server_)
                s.save()
            server_id = s.id
            if not ServerMonitoringData.objects.filter(
                    server_id=server_id,
                    test_id=test_id,
                    data_resolution_id=data_resolution_id).exists():
                df_server = monitoring_df[(
                    monitoring_df.server_name == server_)]
                output_json = json.loads(df_server.to_json(orient='index',
                                                           date_format='iso'),
                                         object_pairs_hook=OrderedDict)
                for row in output_json:
                    data = {
                        'timestamp': row,
                        'Memory_used': output_json[row]['Memory_used'],
                        'Memory_free': output_json[row]['Memory_free'],
                        'Memory_buff': output_json[row]['Memory_buff'],
                        'Memory_cached': output_json[row]['Memory_cached'],
                        'Net_recv': output_json[row]['Net_recv'],
                        'Net_send': output_json[row]['Net_send'],
                        'Disk_read': output_json[row]['Disk_read'],
                        'Disk_write': output_json[row]['Disk_write'],
                        'System_la1': output_json[row]['System_la1'],
                        'CPU_user': output_json[row]['CPU_user'],
                        'CPU_system': output_json[row]['CPU_system'],
                        'CPU_iowait': output_json[row]['CPU_iowait']
                    }
                    server_monitoring_data = ServerMonitoringData(
                        test_id=test_id,
                        data_resolution_id=data_resolution_id,
                        server_id=server_id,
                        data=data)
                    server_monitoring_data.save()
    else:
        logger.info("Result file does not exist")
def update_test_graphite_data(test_id):
    if Configuration.objects.filter(name='graphite_url').exists():
        graphite_url = Configuration.objects.get(name='graphite_url').value
        graphite_user = Configuration.objects.get(name='graphite_user').value
        graphite_password = Configuration.objects.get(
            name='graphite_pass').value

        test = Test.objects.get(id=test_id)
        world_id = ""

        start_time = datetime.datetime.fromtimestamp(
            test.start_time / 1000 + 3600).strftime("%H:%M_%Y%m%d")
        end_time = datetime.datetime.fromtimestamp(
            test.end_time / 1000 + 3600).strftime("%H:%M_%Y%m%d")

        gc = graphiteclient.GraphiteClient(graphite_url, graphite_user,
                                           str(graphite_password))

        for parameter in test.parameters:
            if 'MONITOR_HOSTS' in parameter:
                if parameter['MONITOR_HOSTS']:
                    hosts_for_monitoring = parameter['MONITOR_HOSTS'].split(
                        ',')
                    game_short_name = hosts_for_monitoring[0].split(".", 1)[1]
                    for server_name in hosts_for_monitoring:
                        if not Server.objects.filter(
                                server_name=server_name).exists():
                            server = Server(server_name=server_name)
                            server.save()
                        else:
                            server = Server.objects.get(
                                server_name=server_name)
                        server_name = server_name.replace('.', '_').replace(
                            '_ig_local', '')
                        logger.info(
                            'Try to get monitroing data for: {}'.format(
                                server_name))
                        query = 'aliasSub(stacked(asPercent(nonNegativeDerivative(groupByNode(servers.{' + server_name + '}.system.cpu.{user,system,iowait,irq,softirq,nice,steal},4,"sumSeries")),nonNegativeDerivative(sum(servers.' + server_name + '.system.cpu.{idle,time})))),".*Derivative\((.*)\),non.*","CPU_\\1")'
                        results = gc.query(
                            query,
                            start_time,
                            end_time,
                        )
                        data = {}
                        for res in results:
                            metric = res['target']
                            for p in res['datapoints']:
                                ts = str(datetime.datetime.fromtimestamp(p[1]))
                                if ts not in data:
                                    t = {}
                                    t['timestamp'] = ts
                                    t[metric] = p[0]
                                    data[ts] = t
                                else:
                                    t = data[ts]
                                    t[metric] = p[0]
                                    data[ts] = t
                        ServerMonitoringData.objects.filter(
                            server_id=server.id,
                            test_id=test.id,
                            source='graphite').delete()
                        for d in data:
                            server_monitoring_data = ServerMonitoringData(
                                test_id=test.id,
                                server_id=server.id,
                                data=data[d],
                                source='graphite')
                            server_monitoring_data.save()
            if 'WORLD_ID' in parameter:
                world_id = parameter['WORLD_ID']

        if world_id:
            webservers_mask = '{}w*_{}'.format(world_id, game_short_name)
        else:
            webservers_mask = 'example'

        if not ProjectGraphiteSettings.objects.filter(
                project_id=test.project_id,
                name='gentime_avg_request').exists():
            query = 'alias(avg(servers.' + webservers_mask + '.software.gentime.TimeSiteAvg),"avg")'
            ProjectGraphiteSettings(project_id=test.project_id,
                                    name='gentime_avg_request',
                                    value=query).save()
        if not ProjectGraphiteSettings.objects.filter(
                project_id=test.project_id,
                name='gentime_median_request').exists():
            query = 'alias(avg(servers.' + webservers_mask + '.software.gentime.TimeSiteMed),"median")'
            ProjectGraphiteSettings(project_id=test.project_id,
                                    name='gentime_median_request',
                                    value=query).save()
        if not ProjectGraphiteSettings.objects.filter(
                project_id=test.project_id,
                name='gentime_req_per_sec_request').exists():
            query = 'alias(sum(servers.' + webservers_mask + '.software.gentime.SiteReqPerSec),"rps")'
            ProjectGraphiteSettings(project_id=test.project_id,
                                    name='gentime_req_per_sec_request',
                                    value=query).save()
        if webservers_mask != 'example':
            query = ProjectGraphiteSettings.objects.get(
                project_id=test.project_id, name='gentime_avg_request').value
            results = gc.query(
                query,
                start_time,
                end_time,
            )
            # Ugly bullshit
            query = ProjectGraphiteSettings.objects.get(
                project_id=test.project_id,
                name='gentime_median_request').value
            results_median = gc.query(
                query,
                start_time,
                end_time,
            )
            results.append(results_median[0])

            query = ProjectGraphiteSettings.objects.get(
                project_id=test.project_id,
                name='gentime_req_per_sec_request').value
            results_rps = gc.query(
                query,
                start_time,
                end_time,
            )
            results.append(results_rps[0])
            data = {}
            for res in results:
                metric = res['target']
                for p in res['datapoints']:
                    ts = str(datetime.datetime.fromtimestamp(p[1]))
                    if ts not in data:
                        t = {}
                        t['timestamp'] = ts
                        t[metric] = p[0]
                        data[ts] = t
                    else:
                        t = data[ts]
                        t[metric] = p[0]
                        data[ts] = t
            TestData.objects.filter(test_id=test.id,
                                    source='graphite').delete()
            for d in data:
                test_data = TestData(test_id=test.id,
                                     data=data[d],
                                     source='graphite')
                test_data.save()
    else:
        logger.info('Skipping update of graphite data')
    return True