예제 #1
0
def main():
    project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv',
                               'proj-info')
    analyzer = ProcessMiningAnalyzer(project_info)
    # analyzer.ftype_count()
    # analyzer.frequent_pattern()
    analyzer.neighbor_selection()
예제 #2
0
def main():
  project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv', 'proj-info')
  with open('conf/tokens.json', 'r') as f_in:
    tokens = json.load(f_in)
  for proj in project_info:
    metric = MetricGithub(proj, tokens)
    print(metric.metrics())
예제 #3
0
def main():
    project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv',
                               'proj-info')
    with open('conf/tokens.json', 'r') as f_in:
        tokens = json.load(f_in)
    analyzer = IntegrationAnalyzer(tokens, project_info)
    # print(len(analyzer.builds(project_info[0])))
    data_cache, log_info = {}, {}
    if 'trend_data.json' in os.listdir('cache'):
        with open('cache/trend_data.json', 'r') as f_in:
            data_cache = json.load(f_in)
    if 'log_info.json' in os.listdir('cache'):
        with open('cache/log_info.json', 'r') as f_in:
            log_info = json.load(f_in)
    try:
        for proj in project_info:
            if proj['ID'] in data_cache and proj['ID'] in log_info:
                print('Skip Project {}'.format(proj['ID']))
                continue
            print('Processing Project {}'.format(proj['ID']))
            data = analyzer.trend(proj, reload=False)
            data_cache[proj['ID']] = data
            log_info[proj['ID']] = data['log_info']
    finally:
        with open('cache/trend_data.json', 'w') as f_out:
            json.dump(data_cache, f_out)
        with open('cache/log_info.json', 'w') as f_out:
            json.dump(log_info, f_out)
예제 #4
0
def main():
    project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv',
                               'proj-info')
    with open('conf/tokens.json', 'r') as f_in:
        tokens = json.load(f_in)
    for proj in tqdm(project_info):
        metric = MetricTracker(proj, token=tokens)
        metric.metrics()
예제 #5
0
def main():
  project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv', 'proj-info')
  iteration_grading = IterationGrading('data/', 'detailed')
  with open('conf/tokens.json', 'r') as f_in:
    tokens = json.load(f_in)
  analyzer = MetricComparisonAnalyzer(tokens, project_info, iteration_grading)
  # analyzer.comparison(project_info[0])
  analyzer.correlation()
예제 #6
0
def main():
  project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv', 'proj-info')
  with open('conf/tokens.json', 'r') as f_in:
    tokens = json.load(f_in)
  analyzer = TestAnalyzer(tokens, project_info)
  for proj in tqdm(project_info):
    # analyzer.cucumber_scenarios(proj)
    analyzer.lifecycle(proj)
예제 #7
0
def main():
    with open('conf/tokens.json', 'r') as f_in:
        tokens = json.load(f_in)
    project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv',
                               'project-info')
    analyzer = PivotalTrackerAnalyzer(project_info,
                                      tokens['pivotal_tracker']['token'])
    # analyzer.story_assign_plot()
    analyzer.iteration_points()
예제 #8
0
def main():
    with open('conf/tokens.json', 'r') as f_in:
        token = json.load(f_in)
    project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv',
                               'proj-info')
    analyzer = GithubAnalyzer(token['github']['token'], project_info)
    # analyzer.commits_plot()
    # analyzer.commmits_per_student_plot()
    analyzer.iteration_commits()
예제 #9
0
def main():
  pr_data = PeerReview('data/', 'peer_combined')
  pr_analyzer = PeerReviewAnalyzer(pr_data)
  with open('conf/tokens.json', 'r') as f_in:
    tokens = json.load(f_in)  
  proj_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv', 'proj-info')
  pt_analyzer = PivotalTrackerAnalyzer(proj_info, tokens['pivotal_tracker']['token'])
  analyzer = PtPrComparisonAnalyzer(pr_analyzer, pt_analyzer)
  # analyzer.generate_student_map()
  analyzer.consistency_plot()
예제 #10
0
def main():
    with open('conf/tokens.json', 'r') as f_in:
        tokens = json.load(f_in)

    project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv',
                               'proj-info')
    peer_review = PeerReview('data/', 'peer_combined')

    gt_analyzer = GithubAnalyzer(tokens['github']['token'], project_info)
    pt_analyzer = PivotalTrackerAnalyzer(tokens['pivotal_tracker']['token'],
                                         project_info)
    pr_analyzer = PeerReviewAnalyzer(peer_review)

    analyzer = CombinedAnalyzer(gt_analyzer=gt_analyzer,
                                pt_analyzer=pt_analyzer,
                                pr_analyzer=pr_analyzer)
    # analyzer.workload_correlation_plot(w_type='num_commits_normalized')
    analyzer.workload_correlation_plot(w_type='file_edit_normalized')
    # analyzer.workload_correlation_plot(w_type='line_edit')
    analyzer.prediction('file_edit_normalized')
예제 #11
0
def main():
    project_info = ProjectInfo('data/CS 169 F16 Projects - Sheet1.csv',
                               'proj-info')
    analyzer = ProcessSegmentAnalyzer(project_info)
    # analyzer.git_commit_overlaps()
    analyzer.story_time_overlaps()