raw_writer = csv.DictWriter(raw_output, fieldnames=raw_fieldnames, quoting=csv.QUOTE_ALL) # Write csv headers for both files summary_writer.writeheader() raw_writer.writeheader() company_count = 1 for name in company_names: endpoint = '/{}?statistics=name,fan_count,description,website,posts{{id,type,message,created_time,likes,comments}}&start_date=2018-01-01&end_date=2018-02-01'.format( company_names[name]['fbid']) tokeniser = '&access_token={}'.format(UNASSIGNED_TOKEN) url = UNASSIGNED_ROOT + endpoint + tokeniser t = Test(company_names[name]['code'], name, UNASSIGNED_ROOT, endpoint) for run_count in range(1, NUM_RUNS + 1): r = requests.get(url) print("COMPANY {}/{} | {} - RUN: {}/{} | {}".format( company_count, len(company_names), name, run_count, NUM_RUNS, endpoint)) if r: # print(json.dumps(r.json(), indent=2)) t.responses.append(r) raw_writer.writerow(t.gen_response_row(r)) summary_writer.writerow(t.summarise(check_ok_status=True)) company_count += 1
raw_writer = csv.DictWriter(raw_output, fieldnames=raw_fieldnames, quoting=csv.QUOTE_ALL) # Write csv headers for both files summary_writer.writeheader() raw_writer.writeheader() company_count = 1 for name in company_names: endpoint = '/company/{}?includePosts=true&startDate=2018-01-01&endDate=2018-02-01'.format( company_names[name]['fbid']) url = POSITIVE_VIBES_ROOT + endpoint t = Test(company_names[name]['code'], name, POSITIVE_VIBES_ROOT, endpoint) for run_count in range(1, NUM_RUNS + 1): print("COMPANY {}/{} | {} - RUN: {}/{} | {}".format( company_count, len(company_names), name, run_count, NUM_RUNS, endpoint)) r = requests.get(url) if r: #print(json.dumps(r.json(), indent=2)) t.responses.append(r) raw_writer.writerow(t.gen_response_row(r)) summary_writer.writerow(t.summarise(check_ok_status=False)) company_count += 1