예제 #1
0
def get_inconsistencies():
    files = utils.get_all_images()

    parsed = {}
    for f in files:
        parts = os.path.splitext(f)[0].split('_')

        webcompatID = int(parts[0])
        if webcompatID not in parsed:
            parsed[webcompatID] = {}

        if len(parts) > 2:
            sequence = int(parts[-2])
        else:
            sequence = -1

        if sequence not in parsed[webcompatID]:
            parsed[webcompatID][sequence] = []
        parsed[webcompatID][sequence].append(parts[-1])

    incons = []
    for key, value in parsed.items():
        for sequence, browsers in value.items():
            if len(browsers) < 2:
                incons.append([
                    key, sequence, 'firefox' in browsers, 'chrome' in browsers
                ])

    incons.sort(key=lambda x: (x[2], x[0]))
    return incons
예제 #2
0
def print_statistics(file_name, incons):
    n_incons = len(incons)
    f = utils.get_all_images()
    total_img = len(f)
    firefox = []
    chrome = []
    for line in incons:
        firefox.append(line[2])
        chrome.append(line[3])

    incons_f = [int(x) for x in firefox]
    incons_c = [int(x) for x in chrome]
    print("Number of photos: {} ".format(total_img))
    print("Number of pairs of images: {} ".format(
        int((total_img - n_incons) / 2)))
    print("Number of pairs of images possible: {} ".format(
        int((total_img - n_incons) / 2 + n_incons)))
    print("Percentage of Firefox inconsistencies: {}  ".format(
        int(((n_incons - sum(incons_f)) / n_incons) * 100)))
    print("Percentage of Chrome inconsistencies: {} ".format(
        int(((n_incons - sum(incons_c)) / n_incons) * 100)))
예제 #3
0
from functools import lru_cache
import itertools
import random
from urllib.parse import urlparse

from autowebcompat import network, utils

bugs = utils.get_bugs()

utils.prepare_images()
all_images = utils.get_all_images()[:3000]  # 3000
image = utils.load_image(all_images[0])
input_shape = image.shape
BATCH_SIZE = 32
EPOCHS = 50

bugs_to_website = {}
for bug in bugs:
    bugs_to_website[bug['id']] = urlparse(bug['url']).netloc


@lru_cache(maxsize=len(all_images))
def site_for_image(image):
    bug = image[:image.index('_')]
    return bugs_to_website[int(bug)]


def are_same_site(image1, image2):
    return site_for_image(image1) == site_for_image(image2)

예제 #4
0
parser.add_argument('optimizer',
                    type=str,
                    choices=network.SUPPORTED_OPTIMIZERS,
                    help='Select the optimizer to use for training')
parser.add_argument(
    '--early_stopping',
    dest='early_stopping',
    action='store_true',
    help=
    'Stop training training when validation accuracy has stopped improving.')
args = parser.parse_args()

bugs = utils.get_bugs()

utils.prepare_images()
all_images = utils.get_all_images()[:SAMPLE_SIZE]
image = utils.load_image(all_images[0])
input_shape = image.shape

TRAIN_SAMPLE = 80 * (SAMPLE_SIZE // 100)
VALIDATION_SAMPLE = 10 * (SAMPLE_SIZE // 100)
TEST_SAMPLE = SAMPLE_SIZE - (TRAIN_SAMPLE + VALIDATION_SAMPLE)

bugs_to_website = {}
for bug in bugs:
    bugs_to_website[bug['id']] = urlparse(bug['url']).netloc


@lru_cache(maxsize=len(all_images))
def site_for_image(image):
    bug = image[:image.index('_')]