Esempio n. 1
0
def validation_check(verbose=0):
#    Check on 100 images picked at random from validation set
    val_captions=list()
    for i in range(100):
        idx=np.random.randint(0,1093)
        c=generate_caption(image_dir+filenames_val[idx])
        val_captions.append(copy(c))
        if verbose:
            print_progress(i+1,100)
    return val_captions
Esempio n. 2
0
def create_word_LUT():
    word_lookup = list()
    for i in range(len(bow)):
        #    for i in range(10):
        word = bow[i]['word']
        indices = images_with_word(captions_train, word)

        entry = {'word': word, 'indices': indices}
        word_lookup.append(copy(entry))
        print_progress(i + 1, len(bow))
    return word_lookup
Esempio n. 3
0
def bulk_generation_KNN():
    num_test_images = np.shape(filenames_test)[0]
    generated_captions = list()
    for i in range(num_test_images):
        image_name = filenames_test[i]
        if i == 884:
            image_name = filenames_test[883]
        caption = KNN_score(image_name)
        generated_captions.append(caption)
        print_progress(i, num_test_images)
    return generated_captions
Esempio n. 4
0
def generate_captions_testset(alpha):
    print("Generating captions for alpha =", alpha)
    generated_captions = list()
    for i in range(len(transfer_values_test)):
        transfer_value = transfer_values_test[i]
        generated_caption = generate_caption_reference(transfer_value,
                                                       alpha=alpha)
        generated_captions.append(generated_caption)
        print_progress(i + 1, len(transfer_values_test))
    filename = 'generated_captions_reference_alpha_' + str(alpha) + '.json'
    with open(filename, 'w') as f:
        json.dump(generated_captions, f)
Esempio n. 5
0
        s4 = sentence_bleu(references[i],
                           candidate_tokenized,
                           weights=[0.25, 0.25, 0.25, 0.25],
                           smoothing_function=chencherry.method1)

        # First approach: the best sentence is the one with the best
        # BLEU sum
        bleuSum = s1 + s2 + s3 + s4
        bleuSums.append(bleuSum)
    bestIdx = np.argmax(bleuSums)
    #    print(bestCaption,bleuSums)
    bestCaption = beamCaptions[i][bestIdx]['sentence']
    bestCaptions.append(bestCaption[1:])
    idxList.append(bestIdx)
    #    plot progress
    print_progress(i + 1, num_samples)

with open(out_dir + 'absolute_best.json', 'w') as outFile:
    json.dump(bestCaptions, outFile)

# SECON METHOD
# the best caption is the one with the best consensus score

# VGG16 consensus

print('\nChoosing best caption based on VGG16 consensus')
image_dir = 'UAV/images/'
VGG16_idxList = list()
VGG16_bestCaptions = list()

transfer_values_train = np.load(
Esempio n. 6
0
def parse_links(names, src, menu_links, input_path, output_path,
                layer_level, recursion_depth):
    """
    Parses links and organizes folders recursively
    - Finds suitable links in html file corresponding to input path
    - Changes the links by giving them unique names
    - Creates a new directory with that unique name in the output_path
    - Generates the modified html file in the output_path
    """
    print_progress(layer_level)

    with open(input_path) as f:
        soup = BeautifulSoup(f.read())
        html_path = os.path.join(output_path, "index.html")
    parse_css_links(soup, layer_level)
    parse_images(soup, layer_level)
    save_html(soup, html_path)

    bread_tag = soup.find("div", attrs={"class": "breadcum"})
    bread_count = 0
    if bread_tag:
        if bread_tag.find_all('a'):
            for link_tag in reversed(bread_tag.find_all('a')):
                link_tag['href'] = bread_count*"../"
                if layer_level is 3:
                    link_tag['href'] = bread_count*"../" + "../"
                bread_count = bread_count + 1
                
    for link_tag in soup.find_all('a'):
        parse_menu_links(layer_level, link_tag, menu_links)
        link = link_tag.get('href')
        if link is None:
            continue
        elink = link_tag.get('href')
        if link.startswith("http://"):    
            if (link  in downloadablelinks):
                link = link[6:]
                link_tag['href'] = layer_level*"../" + "../external" + link
                if layer_level is 3:
                    link_tag['href'] = 4*"../" + "../external" + link
            elif (link in activeallexternallinks):
                link_tag['href'] = 'javascript:var c=confirm("Do You wish to access internet?");if(c==true){window.location="'+elink+'";}'
            else: 
                link_tag['href'] = 'javascript:alert("Link is dead");'    
        if link_is_appropriate(link, layer_level):
            name = link_tag.string.strip(" \t\n\r").partition(' ')[0].lower()
            name = get_unique_name(name, names)
            if layer_level is 2:
                name = name + "/theory"
            link_tag['href'] = name
            new_output_path = os.path.join(output_path, name)
            make_new_directory(new_output_path)

            if recursion_depth > 0:
                parse_links(names, src, menu_links,
                            input_path=os.path.join(src, link),
                            output_path=new_output_path,
                            layer_level=layer_level + 1,
                            recursion_depth=recursion_depth - 1)
        if (layer_level is 1 and link.count('&') is 3):
            name2 = link_tag.string.strip(" \t\n\r").partition(' ')[0].lower()
            link_tag['href'] = name + "/" + name2 + "/theory"         
        parse_last_layer(link_tag, names, src, menu_links, input_path,
                         output_path, layer_level, recursion_depth)
    save_html(soup, html_path)
Esempio n. 7
0
def parse_links(names, src, menu_links, input_path, output_path, layer_level,
                recursion_depth):
    """
    Parses links and organizes folders recursively
    - Finds suitable links in html file corresponding to input path
    - Changes the links by giving them unique names
    - Creates a new directory with that unique name in the output_path
    - Generates the modified html file in the output_path
    """
    print_progress(layer_level)

    with open(input_path) as f:
        soup = BeautifulSoup(f.read())
        html_path = os.path.join(output_path, "index.html")
    parse_css_links(soup, layer_level)
    parse_images(soup, layer_level)
    save_html(soup, html_path)

    bread_tag = soup.find("div", attrs={"class": "breadcum"})
    bread_count = 0
    if bread_tag:
        if bread_tag.find_all('a'):
            for link_tag in reversed(bread_tag.find_all('a')):
                link_tag['href'] = bread_count * "../"
                if layer_level is 3:
                    link_tag['href'] = bread_count * "../" + "../"
                bread_count = bread_count + 1

    for link_tag in soup.find_all('a'):
        parse_menu_links(layer_level, link_tag, menu_links)
        link = link_tag.get('href')
        if link is None:
            continue
        elink = link_tag.get('href')
        if link.startswith("http://"):
            if (link in downloadablelinks):
                link = link[6:]
                link_tag['href'] = layer_level * "../" + "../external" + link
                if layer_level is 3:
                    link_tag['href'] = 4 * "../" + "../external" + link
            elif (link in activeallexternallinks):
                link_tag[
                    'href'] = 'javascript:var c=confirm("Do You wish to access internet?");if(c==true){window.location="' + elink + '";}'
            else:
                link_tag['href'] = 'javascript:alert("Link is dead");'
        if link_is_appropriate(link, layer_level):
            name = link_tag.string.strip(" \t\n\r").partition(' ')[0].lower()
            name = get_unique_name(name, names)
            if layer_level is 2:
                name = name + "/theory"
            link_tag['href'] = name
            new_output_path = os.path.join(output_path, name)
            make_new_directory(new_output_path)

            if recursion_depth > 0:
                parse_links(names,
                            src,
                            menu_links,
                            input_path=os.path.join(src, link),
                            output_path=new_output_path,
                            layer_level=layer_level + 1,
                            recursion_depth=recursion_depth - 1)
        if (layer_level is 1 and link.count('&') is 3):
            name2 = link_tag.string.strip(" \t\n\r").partition(' ')[0].lower()
            link_tag['href'] = name + "/" + name2 + "/theory"
        parse_last_layer(link_tag, names, src, menu_links, input_path,
                         output_path, layer_level, recursion_depth)
    save_html(soup, html_path)