Ejemplo n.º 1
0
def NationalBaseTest(page_urls, html, html2=""):
    """ All the Branch IO Test scripts have the same base test set for National.
        This function tests the Station Name for a given list of page urls. This
        function accepts 3 arguments:
        1) The list of page_urls to test
        2) The HTML page under test (example: the article_url or the contest_url)
        3) A second HTML page under test which is optional (example: the script
        for section fronts tests both the primary section front url and the secondary
        section front url at the same time).
    """
    # Expected Station Name (National pages do not have an API to pull this from)
    station_name = "sanitized-url"

    # Counters for tallying test results
    total_calls = 0
    failed_national_calls = 0

    # Start a loop to go through the given page_urls list
    for url in page_urls:
        total_calls += 1

        html_url1 = url[html]  # <-- URL is given to us in the page_urls list
        url1_page_data = functions.call_url(html_url1)[:PAGE_BUFFER]

        # Some scripts have 2 HTML URLs to verify against. Some have only one.
        if html2 != "":
            html_url2 = url[html2]
            url2_page_data = functions.call_url(html_url2)[:PAGE_BUFFER]
            status = test.runTagCheckDouble(station_name, url1_page_data,
                                            url2_page_data,
                                            "branch:deeplink:station_name")
        else:
            status = test.runTagCheck(station_name, url1_page_data,
                                      "branch:deeplink:station_name")

        if status == False:
            failed_national_calls += 1

    # Printing test results
    print("\n-------NATIONAL BASE TEST RESULTS----------")
    print("Total National Calls Failed: %d of %d" %
          (failed_national_calls, total_calls))

    # Return how many total URL failures there were
    return failed_national_calls
Ejemplo n.º 2
0
total_calls = 0

# Loop through each Station using the ID in the list
for item in id_list:
    total_calls += 1
    tests_failed = 0

    station_id = item["id"]  #<-- Given in the json file

    # Build URLs for the testing
    api_url = "sanitized" % station_id
    meta_title_url = "sanitized" % station_id
    meta_description_url = "sanitized" % station_id

    # Get the station data from the Core API
    station_data_text = functions.call_url(api_url)
    station_data = json.loads(station_data_text)
    station_dictionary = station_data["data"]

    # Pull out the important Core API variables
    station_name = station_dictionary["attributes"]["name"]
    station_slogan = station_dictionary["attributes"]["slogan"]
    site_slug = station_dictionary["attributes"]["site_slug"]

    # Build the expected title from the Core API data. Core API is the "source
    # of truth" for these tests.
    # If station_slogan is None...or empty....the expected title changes
    if station_slogan == None or station_slogan == "":
        expected_title = 'sanitized' % station_name
    else:
        expected_title = 'sanitized' % (station_name, station_slogan)
# URLS - listed in a json file
with open(LIBRARY_PATH + "/migratedStationUrls.json") as file:
    station_urls = json.load(file)

# Cycle through each of the Stations and run the tests
for url in station_urls:
    total_calls += 1
    tests_failed = 0

    api_url = url["api_url"]
    html_url = url["prod_url"]
    # html_url = url["preprod_url"]

    # Get Station data from the API
    station_data_text = functions.call_url(api_url)
    station_data = json.loads(station_data_text)

    # ---------------------------
    # Variable setup section
    # ---------------------------

    # Separate out the information we want into variables
    station_dictionary = station_data["data"]
    station_id = station_dictionary["id"]
    station_description = station_dictionary["attributes"]["description"]
    station_slogan = station_dictionary["attributes"]["slogan"]
    station_market = station_dictionary["attributes"]["market"]["display_name"]
    station_name = station_dictionary["attributes"]["name"]
    station_logo = station_dictionary["attributes"]["square_logo_small"]
Ejemplo n.º 4
0
def singleTest(page_urls):
    """ This is the test for all the Stage only AdOps NMC Tag verification scripts.
        This function accepts 1 argument: page_urls. This function returns the
        number of pages failed so that the main test script can report to Google Sheets.
    """

    failed_pid_test = 0
    failed_tag_test = 0

    pages_passed = 0
    pages_failed = 0
    total_calls = 0

    for url in page_urls:
        tests_passed_for_this_page = 0
        total_calls += 1

        stg_url = url["url"]
        expected_pid = url["expected_pid"].upper()
        expected_tag = url["expected_tag"].upper()

        # Call for the web page data once and use the data for the tests
        web_page_data = functions.call_url(stg_url)[:PAGE_BUFFER]

        ###################################################
        # TEST 1 of 2: Compare HTML nmc:pid with expected
        ###################################################

        # Send web page data to AdOps_HTML_NMCTagTest
        # Check to see if it matches the expected value
        # Get back true/false
        status = runTest.test_nmc_pid_single(web_page_data, expected_pid)

        if status == True:
            message = "   TEST 1 of 2: %s\n" % PASSED
            tests_passed_for_this_page += 1
        else:
            message = "   TEST 1 of 2: %s\n" % FAILED
            failed_pid_test += 1
        functions.show_message(message)

        ###################################################
        # TEST 2 of 2: Compare HTML nmc:tag with expected
        ###################################################

        # Send web page data to AdOps_HTML_NMCTagTest
        # Check to see if it matches the expected value
        # Get back true/false
        status = runTest.test_nmc_tag_single(web_page_data, expected_tag)

        if status == True:
            message = "   TEST 2 of 2: %s\n" % PASSED
            tests_passed_for_this_page += 1
        else:
            message = "   TEST 2 of 2: %s\n" % FAILED
            failed_tag_test += 1
        functions.show_message(message)

        # Print and tally results before going to the next url for testing
        print("\n****\nThis page Passed = %d of 2\n****\n" %
              tests_passed_for_this_page)
        if tests_passed_for_this_page == 2:
            pages_passed += 1
        else:
            pages_failed += 1

    # Printing test results
    print("\n\n-------RESULTS----------")
    print("%d of %d pages PASSED" % (pages_passed, total_calls))
    print("----------------------------")
    print("%d of %d NMC:PID Tests FAILED" % (failed_pid_test, total_calls))
    print("%d of %d NMC:TAG Tests FAILED" % (failed_tag_test, total_calls))

    return (pages_failed)
import functions, FillGoogleSheetWithTestResults

# Initialize the logger
logging.basicConfig(filename=LOG_FILE,
                    level=logging.INFO,
                    format=LOG_FORMAT,
                    filemode="w")
logger = logging.getLogger()
logger.info(
    "Logging Started =================================================")

# Podcast API URL
podcast_api_url = "sanitized"

# Get all of the podcast data from the API as a list of dictionaries
podcast_data_text = functions.call_url(podcast_api_url)
podcast_data = json.loads(podcast_data_text)  # <-- List of dictionaries

# Variables
failed_title_calls = 0
failed_description_calls = 0
failed_image_calls = 0
podcasts_failed = 0
total_calls = 0

# Starting the for loop to check data for each podcast
for podcast_dictionary in podcast_data["data"]:
    total_calls += 1
    tests_passed_for_this_podcast = 0

    # Pull out the important API variables
    if param is None:
        message = "%s -> %s is of type None" % (FAILED, descriptor)
        functions.show_message(message)
        return True
    else:
        message = "%s -> %s is: %s" % (PASSED, descriptor, param)
        functions.show_message(message)
        return False


# ---- End Functions section -----

# Set up for Podcast data
podcast_api_url = "sanitized"
# Get the Podcast API data
podcast_data_text = functions.call_url(podcast_api_url)
podcast_data = json.loads(podcast_data_text)

# Variables
podcast_title_count = 0
total_empty = 0
total_eps = 0

# Starting the loop to grab a Podcast
for podcast_dictionary in podcast_data["data"]:
    podcast_title_count += 1

    # Create a URL like this one:
    # sanitized
    podcast_site_slug = podcast_dictionary["attributes"]["site_slug"]
    base_url = "sanitized"
Ejemplo n.º 7
0
    else:
        message = "%s -> %s is: %s" % (PASSED, descriptor, param)
        functions.show_message(message)
        return False


# ---- End Functions section -----

# Go here and verify 200 OK
# sanitized
episode_url_base = "sanitized"
podcast_filter = "sanitized"
podcast_api_url = "sanitized" % (episode_url_base, podcast_filter)

# Get the podcast data
podcast_data_text = functions.call_url(podcast_api_url)
podcast_data = json.loads(podcast_data_text)

# Message for troubleshooting
message = "Found %d Podcast Episodes in the API.\n" % len(podcast_data["data"])
functions.show_message(message)

# Variables
title_count = 0
empty_title = 0
empty_image = 0
empty_pub_date = 0
empty_duration = 0

# Starting the for loop to check each podcast episode
for podcast_dictionary in podcast_data["data"]:
Ejemplo n.º 8
0
    if code != 200:
        functions.show_message("   %s. %s returned %d \n" %
                               (FAILED, descriptor, code))
        return False
    else:
        functions.show_message("   %s, %s returned 200 \n" %
                               (PASSED, descriptor))
        return True


# ---- End Functions section -----

# Set up for Station data
core_api_url = "sanitized"
# Get the API data - used for finding station site slugs
station_data_text = functions.call_url(core_api_url)
station_data = json.loads(station_data_text)

# Set up for Podcast data
podcast_api_url = "sanitized"
# Get the podcast API data
podcast_data_text = functions.call_url(podcast_api_url)
podcast_data = json.loads(podcast_data_text)

# Variables
page1_failed_calls = 0
page2_failed_calls = 0
total_calls = 0
total_podcasts = 0

# Starting the for loop to check data for each podcast
Ejemplo n.º 9
0
WARNING     = "\033[33mWARNING\033[0m" # \___ Linux-specific colorization
FAILED      = "\033[31mFAILED\033[0m"  # /
ERROR       = "\033[31mERROR\033[0m"   #/
PAGE_BUFFER = 8192

# Custom library imports
sys.path.append(LIBRARY_PATH)
import functions, FillGoogleSheetWithTestResults

# Initialize the logger
logging.basicConfig(filename=LOG_FILE, level=logging.INFO, format=LOG_FORMAT, filemode="w")
logger = logging.getLogger()
logger.info("Logging Started =================================================")

page_url = "sanitized"
page_data = functions.call_url(page_url)[:PAGE_BUFFER]

# ------------------------
# Test section
# ------------------------
title_failed = 0
desc_failed = 0

# Title should exist and be exactly like this:
# sanitized
print("   TEST 1: Title Check")

expected_title = "sanitized"
expected_title = expected_title.upper()

# Function returns string in upper case
Ejemplo n.º 10
0
def baseTest(page_urls, html, html2=""):
    """ All the Branch IO Test scripts have the same base test set for Station URLs.
        This function tests the Station Market, Logo, Name, ID, Category, and Genre
        Name for a given list of page urls. This function accepts 3 arguments:
        1) The list of page_urls to test
        2) The HTML page under test (example: the article_url or the contest_url)
        3) A second HTML page under test which is optional (example: the script
        for section fronts tests both the primary section front url and the secondary
        section front url at the same time).
    """
    # Counters for tallying test results
    total_calls = 0
    pages_passed = 0
    pages_failed = 0

    marketFail = 0
    logoFail = 0
    nameFail = 0
    idFail = 0
    categoryFail = 0
    genreFail = 0

    # Start a loop to go through the given page_urls list
    for url in page_urls:
        total_calls += 1

        #####################################
        #  API Variable SECTION
        #####################################

        api_url = url[
            "api_url"]  # <-- URL is given to us in the page_urls list

        # Get all of the station data from the core API as a list of dictionaries
        station_data_text = functions.call_url(api_url)
        station_data = json.loads(
            station_data_text)  # <-- List of dictionaries
        station_dictionary = station_data[
            "data"]  # <-- We ALWAYS want variables from the data section

        # Set variables for API comparison testing later
        station_id = station_dictionary["id"]
        station_market = station_dictionary["attributes"]["market"][
            "display_name"]
        station_name = station_dictionary["attributes"]["name"]
        station_logo = station_dictionary["attributes"]["square_logo_small"]

        # Category is a LIST (of ONLY ONE dictionary) that is nested inside of "attributes"
        station_category = station_dictionary["attributes"]["category"]

        # Genre is a LIST (of AT LEAST ONE dictionary) that is nested inside of "attributes"
        station_genre = station_dictionary["attributes"]["genre"]

        # Check if there is more than one dictionary in this list
        # Separate out the genre names from the other dictionary values
        if len(station_genre) > 1:
            message = "Station has more than one genre dictionary in the list!"
            functions.show_message(message)
            list_of_genre_names = []
            for item_dict in station_genre:
                name = item_dict["name"]
                list_of_genre_names.append(name)
            station_genre_name = list_of_genre_names
            print("There is more than one genre name!! API returns: %s" %
                  station_genre_name)
        else:
            station_genre_name = station_genre[0]["name"]

        # If any variables are empty, log them
        station_market = functions.isNoneOrEmpty(station_market,
                                                 "Station Market")
        station_logo = functions.isNoneOrEmpty(station_logo, "Station Logo")
        station_name = functions.isNoneOrEmpty(station_name, "Station Name")
        station_category = functions.isCategoryOrGenreEmpty(
            station_category, "Station Category")
        station_genre = functions.isCategoryOrGenreEmpty(
            station_genre, "Station Genre")
        station_id = functions.isIDEmpty(station_id)

        # Check if Genre Name is a list (the list we made earlier or just a single value)
        # Make uppercase for comparison testing
        if type(station_genre_name) is list:
            message = "The API Genre is a list."
            station_genre_name = [item.upper() for item in station_genre_name]
        else:
            message = "The API Genre is a single value."
            station_genre_name = station_genre_name.upper()
        functions.show_message(message)

        # Check if category (the value not the section) is a list
        # Make uppercase for comparison testing
        if type(station_category) is list:
            message = "The API Category is a list."
            station_category = [item.upper() for item in station_category]
        else:
            message = "The API Category is a single value."
            station_category = station_category.upper()
        functions.show_message(message)

        #####################################
        #  HTML Variable SECTION
        #####################################

        html_url1 = url[html]  # <-- URL is given to us in the page_urls list
        url1_web_page_data = functions.call_url(html_url1)[:PAGE_BUFFER]

        # Some scripts have 2 HTML URLs to verify against. Some have only one.
        if html2 != "":
            html_url2 = url[html2]
            url2_web_page_data = functions.call_url(html_url2)[:PAGE_BUFFER]
        else:
            url2_web_page_data = ""

        #####################################
        #  TEST SECTION
        #####################################

        # Tests with 1 HTML URL
        if url2_web_page_data == "":
            market_test = test.runTagCheck(station_market, url1_web_page_data,
                                           "branch:deeplink:market")
            logo_test = test.runTagCheck(station_logo, url1_web_page_data,
                                         "branch:deeplink:station_logo")
            name_test = test.runTagCheck(station_name, url1_web_page_data,
                                         "branch:deeplink:station_name")
            id_test = test.runTagCheck(str(station_id), url1_web_page_data,
                                       "branch:deeplink:station_id")
            # If there is a list of categories to compare against
            if type(station_category) is list:
                category_test = test.runTagListCheck(
                    station_category, url1_web_page_data,
                    "branch:deeplink:category")
            elif type(station_category) is str:
                category_test = test.runTagCheck(station_category,
                                                 url1_web_page_data,
                                                 "branch:deeplink:category")
            # If there is a list of genre names to compare against
            if type(station_genre_name) is list:
                genre_test = test.runTagListCheck(station_genre_name,
                                                  url1_web_page_data,
                                                  "branch:deeplink:genre")
            elif type(station_genre_name) is str:
                genre_test = test.runTagCheck(station_genre_name,
                                              url1_web_page_data,
                                              "branch:deeplink:genre")

        # Tests with 2 HTML URLs
        else:
            market_test = test.runTagCheckDouble(station_market,
                                                 url1_web_page_data,
                                                 url2_web_page_data,
                                                 "branch:deeplink:market")
            logo_test = test.runTagCheckDouble(station_logo,
                                               url1_web_page_data,
                                               url2_web_page_data,
                                               "branch:deeplink:station_logo")
            name_test = test.runTagCheckDouble(station_name,
                                               url1_web_page_data,
                                               url2_web_page_data,
                                               "branch:deeplink:station_name")
            id_test = test.runTagCheckDouble(str(station_id),
                                             url1_web_page_data,
                                             url2_web_page_data,
                                             "branch:deeplink:station_id")
            # If there is a list of genre names to compare against
            if type(station_category) is list:
                category_test = test.runTagListCheckDouble(
                    station_category, url1_web_page_data, url2_web_page_data,
                    "branch:deeplink:category")
            elif type(station_category) is str:
                category_test = test.runTagCheckDouble(
                    station_category, url1_web_page_data, url2_web_page_data,
                    "branch:deeplink:category")
            # If there is a list of genre names to compare against
            if type(station_genre_name) is list:
                genre_test = test.runTagListCheckDouble(
                    station_genre_name, url1_web_page_data, url2_web_page_data,
                    "branch:deeplink:genre")
            elif type(station_genre_name) is str:
                genre_test = test.runTagCheckDouble(station_genre_name,
                                                    url1_web_page_data,
                                                    url2_web_page_data,
                                                    "branch:deeplink:genre")

        #####################################
        #  TEST RESULTS SECTION
        #####################################

        # Counter for how many of the 6 tests that this set of urls failed
        urlFailCount = 0

        # Check if all are truthy
        if market_test and logo_test and name_test and id_test and category_test and genre_test:
            message = "\nAll tests passed for this URL set.\n\n"
            pages_passed += 1
        # If at least one is not True, gotta count what failed and how many of them
        else:
            pages_failed += 1  # <-- Increment the overall/total URL Failure count
            if market_test == False:
                urlFailCount += 1
                marketFail += 1
            if logo_test == False:
                urlFailCount += 1
                logoFail += 1
            if name_test == False:
                urlFailCount += 1
                nameFail += 1
            if id_test == False:
                urlFailCount += 1
                idFail += 1
            if category_test == False:
                urlFailCount += 1
                categoryFail += 1
            if genre_test == False:
                urlFailCount += 1
                genreFail += 1

        # Print and tally results before going to the next url for testing
        message = "\n%d of 6 Tests Failed!!\n\n" % urlFailCount
        functions.show_message(message)

    # Finally outside the for loop
    # Printing test results
    print("\n-------STATION BASE TEST RESULTS----------")
    print("%d of %d Total Calls PASSED" % (pages_passed, total_calls))
    print("----------------------------")
    print("%d of %d Market Tests FAILED" % (marketFail, total_calls))
    print("%d of %d Logo Tests FAILED" % (logoFail, total_calls))
    print("%d of %d Name Tests FAILED" % (nameFail, total_calls))
    print("%d of %d ID Tests FAILED" % (idFail, total_calls))
    print("%d of %d Category Tests FAILED" % (categoryFail, total_calls))
    print("%d of %d Genre Name Tests FAILED" % (genreFail, total_calls))
    print("-------STATION BASE TEST RESULTS----------\n\n")

    # Return how many total URL failures there were
    return pages_failed
Ejemplo n.º 11
0
PAGE_BUFFER = 8192

# Custom library imports
sys.path.append(LIBRARY_PATH)
import functions, FillGoogleSheetWithTestResults

# Initialize the logger
logging.basicConfig(filename=LOG_FILE, level=logging.INFO, format=LOG_FORMAT, filemode="w")
logger = logging.getLogger()
logger.info("Logging Started =================================================")

# The Core API - Just grabbing the first 100 stations
api_url = "sanitized"

# Get all of the station data from the core API as a list of dictionaries
station_data_text = functions.call_url(api_url)
station_data = json.loads(station_data_text) # <-- List of dictionaries

# Variables
stations_failed = 0
total_calls = 0

# Starting the for loop to check each station's data content
for station_dictionary in station_data["data"]:
    total_calls += 1

    # Pull out the important Core API variables
    site_slug = station_dictionary["attributes"]["site_slug"]
    station_name  = station_dictionary["attributes"]["name"]
    station_slogan = station_dictionary["attributes"]["slogan"]
total_calls = 0

# URLS - listed in a json file
with open(LIBRARY_PATH + "/migratedStationUrls.json") as file:
    station_urls = json.load(file)

# Body of Script
for url in station_urls:
    tests_passed_for_this_page = 0
    total_calls += 1

    prod_url = url["prod_url"]
    preprod_url = url["preprod_url"]

    # Call for the web page data once and use the data for the tests
    prod_web_page_data = functions.call_url(prod_url)[:PAGE_BUFFER]
    preprod_web_page_data = functions.call_url(preprod_url)[:PAGE_BUFFER]

    ###################################################
    # TEST 1 of 2: Compare HTML nmc:pid with expected
    # nmc:pid should be content=”homepage”
    ###################################################

    # Send web page data to AdOps_HTML_NMCTagTest
    # Check to see if they match the expected value
    # Get back true/false for prod and preprod
    prod_match, preprod_match = runTest.test_nmc_pid(prod_web_page_data,
                                                     preprod_web_page_data,
                                                     expected_pid)

    # Check to see if prod and preprod are both true
Ejemplo n.º 13
0
#-------------------------------------------------------------
# TEST 7: Compare Editorial Tags with Known Tags.
# Expecting tags: QA, Shana, test, Automation, backend
# Because tags may be in a different order, we are not able
# to use the functions in the StationBranchIOTagCheck script.
#-------------------------------------------------------------

# Using the same json file - same urls - we used for the other 6 tests
for section in station_urls:
    # We only want to test the article and gallery urls from the json file
    html_url1 = section["article_url"]
    html_url2 = section["gallery_url"]

    # Call for the web page data once and use the data for the test
    url1_web_page_data = functions.call_url(html_url1)[:PAGE_BUFFER]
    url2_web_page_data = functions.call_url(html_url2)[:PAGE_BUFFER]
    expected_tags = ['AUTOMATION', 'BACKEND', 'TEST', 'QA', 'SHANA']

    print("   TEST: Editorial Tags Check")
    # Function returns string in upper case
    branch_io_tags_from_html_url1 = functions.get_meta_tag_content_list(url1_web_page_data, "", "branch:deeplink:editorial_tags")
    branch_io_tags_from_html_url2 = functions.get_meta_tag_content_list(url2_web_page_data, "", "branch:deeplink:editorial_tags")

    # Comparison time
    print("Comparing Expected Tags: %s" % expected_tags)
    print("With Article tags:       %s" % branch_io_tags_from_html_url1)
    print("And with Gallery tags:   %s" % branch_io_tags_from_html_url2)

    mismatch = 0
                    filemode="w")
logger = logging.getLogger()
logger.info(
    "Logging Started =================================================")

# Variables
editorial_group_data_new = {}
editorial_feeds_new = {}
failed = 0

# ------------------------------------------------
# Step 1: GET editorial group data from database
# ------------------------------------------------

editorial_group_url = "sanitized-url"
editorial_group_text = functions.call_url(editorial_group_url)
editorial_group_data_original = json.loads(editorial_group_text)

# For each entry item returned in the GET response
for entry in editorial_group_data_original:

    # ------------------------------------------------------------
    # Step 2: Get the Original Values specifically for the entry
    # with id = xxx.
    #
    # Note: The xxx entry was created for testing with this script
    # so that changing information doesn't influence other
    # editorial group testing.
    # ------------------------------------------------------------

    editorial_id = entry["id"]