Exemple #1
0
	def run_calculate_weighting_command( self ):
		target_file_path, message = PySide.QtGui.QFileDialog( ).getSaveFileName( None, 'Save Location' )
		data, message = methods.determine_weighting( self.transform, root_bone = self.root_bone, tolerance = self.tolerance )
		if not data:
			self.warning( message )
			self.file_path_field.setText( 'Data invalid' )
			return False
		
		methods.save_json( target_file_path, data )
		if os.path.exists( target_file_path ):
			self.file_path = target_file_path
			self.file_path_field.setText( target_file_path )
		
		return True
Exemple #2
0
def run_skin_calculation( transform, root_bone, tolerance = -1, file_path = '' ):
	"""
	Main method call for the skin converter to calculate skin weights from the transform's shape node.
	
	Returns [ data, message ]
	Data    : skin calculation data
	Message : message about the results. If data == False, message holds more info.
	"""
	data, message = methods.determine_weighting( transform, root_bone, tolerance = tolerance )
	
	# Data is valid, so if a file path was provided too, save the data out. 
	if file_path and data:
		methods.save_json( file_path, data )

	return data, message
def annotate_data(source_file, annotations_file, tag=False, compounds=False):
    "Function to annotate existing coco data"
    data = load_json(source_file)
    for entry in data:
        raw_description = entry['caption']
        doc = nlp.tokenizer(raw_description)
        entry['tokenized'] = [tok.orth_ for tok in doc]
        if tag:
            # Call the tagger on the document.
            nlp.tagger(doc)
            entry['tagged'] = [(tok.orth_, tok.tag_) for tok in doc]
        if compounds:
            list_of_compounds = compounds_from_doc(doc)
            entry['compounds'] = list_of_compounds
    save_json(data, annotations_file)
    return data
Exemple #4
0
    def run_calculate_weighting_command(self):
        target_file_path, message = PySide.QtGui.QFileDialog().getSaveFileName(
            None, 'Save Location')
        data, message = methods.determine_weighting(self.transform,
                                                    root_bone=self.root_bone,
                                                    tolerance=self.tolerance)
        if not data:
            self.warning(message)
            self.file_path_field.setText('Data invalid')
            return False

        methods.save_json(target_file_path, data)
        if os.path.exists(target_file_path):
            self.file_path = target_file_path
            self.file_path_field.setText(target_file_path)

        return True
def run_all(args):
    "Run all metrics on the data and save JSON files with the results."
    # Annotate generated data.
    annotated = annotate_data(args.source_file,
                              args.annotations_file,
                              tag=True,
                              compounds=True)

    # Load training data. (For computing novelty.)
    train_data = load_json('./Data/COCO/Processed/tokenized_train2014.json')
    train_descriptions = [
        entry['caption'] for entry in train_data['annotations']
    ]

    # Load annotated data.
    sentences = sentences_from_file(args.annotations_file)

    # Analyze the data.
    stats = system_stats(sentences)

    # Get raw descriptions.
    gen_descriptions = [
        entry['caption'] for entry in load_json(args.source_file)
    ]
    extra_stats = sentence_stats(train_descriptions, gen_descriptions)
    stats.update(extra_stats)

    # Save statistics data.
    save_json(stats, args.stats_file)

    ################################
    # Global recall

    train_stats = load_json('./Data/COCO/Processed/train_stats.json')
    val_stats = load_json('./Data/COCO/Processed/val_stats.json')

    train = set(train_stats['types'])
    val = set(val_stats['types'])
    learnable = train & val

    gen = set(stats['types'])
    recalled = gen & val

    coverage = {
        "recalled": recalled,
        "score": len(recalled) / len(learnable),
        "not_in_val": gen - learnable
    }

    coverage['omissions'] = most_frequent_omissions(
        coverage['recalled'],
        val_stats,  # Use validation set as reference.
        n=None)
    val_count_list = get_count_list(val_stats)
    coverage['percentiles'] = percentiles(val_count_list, recalled)
    save_json(coverage, args.global_coverage_file)

    ####################################
    # Local recall

    val_index = index_from_file('./Data/COCO/Processed/tagged_val2014.json',
                                tagged=True,
                                lower=True)
    generated = {entry['image_id']: entry['tokenized'] for entry in annotated}
    local_recall_res = dict(scores=local_recall_scores(generated, val_index),
                            counts=local_recall_counts(generated, val_index))
    save_json(local_recall_res, args.local_coverage_file)

    ##################################
    # Nouns pps
    npdata = {
        'pp_data': pp_stats(annotated),
        'compound_data': compound_stats(annotated)
    }
    save_json(npdata, args.noun_pp_file)
    for entry in coverage_results.values():
        entry['omissions'] = most_frequent_omissions(
            entry['recalled'],
            val_stats,  # Use validation set as reference.
            n=None)  # Rank everything

    # Add percentile scores.
    val_count_list = get_count_list(val_stats)
    for entry in coverage_results.values():
        recalled = entry['recalled']
        entry['percentiles'] = percentiles(val_count_list, recalled)

    plot_percentiles(coverage_results)

    # Save the data
    save_json(coverage_results, './Data/Output/global_recall.json')

    # Show a table with the results.
    table = tabulate(tabular_data=[
        (system, entry['score']) for system, entry in coverage_results.items()
    ],
                     headers=['System', 'Coverage'],
                     tablefmt='latex_booktabs',
                     floatfmt='.2f')

    print(table)
    with open('./Data/Output/global_recall_table.txt', 'w') as f:
        f.write(table)
        f.write('\n\n')
        f.write(
            f'The limit is:  {limit}. This means {size_limit} words in Val cannot be learned.'
Exemple #7
0
        'Shetty-et-al-2016': 'Shetty et al. 2016',
        'Shetty-et-al-2017': 'Shetty et al. 2017',
        'Tavakoli-et-al-2017': 'Tavakoli et al. 2017',
        'Vinyals-et-al-2017': 'Vinyals et al. 2017',
        'Wu-et-al-2016': 'Wu et al. 2016',
        'Zhou-et-al-2017': 'Zhou et al. 2017'
    }

    system2color = dict(zip(sorted(system2label), my_palette))

    val_index = index_from_file('./Data/COCO/Processed/tagged_val2014.json',
                                tagged=True,
                                lower=True)

    systems = [
        'Dai-et-al-2017', 'Liu-et-al-2017', 'Mun-et-al-2017',
        'Shetty-et-al-2016', 'Shetty-et-al-2017', 'Tavakoli-et-al-2017',
        'Vinyals-et-al-2017', 'Wu-et-al-2016', 'Zhou-et-al-2017'
    ]

    all_results = dict()
    for system in systems:
        print('Processing:', system)
        generated = name_to_mapping(system)
        system_results = dict(scores=local_recall_scores(generated, val_index),
                              counts=local_recall_counts(generated, val_index))
        all_results[system] = system_results

    plot_scores(all_results)
    save_json(all_results, './Data/Output/local_recall.json')
train_data = load_json('./Data/COCO/Processed/tokenized_train2014.json')
train_descriptions = [entry['caption'] for entry in train_data['annotations']]

for folder in [
        'Dai-et-al-2017', 'Liu-et-al-2017', 'Mun-et-al-2017',
        'Shetty-et-al-2016', 'Shetty-et-al-2017', 'Tavakoli-et-al-2017',
        'Vinyals-et-al-2017', 'Wu-et-al-2016', 'Zhou-et-al-2017'
]:
    print('Processing:', folder)

    # Define source and target.
    base = './Data/Systems/'
    source = base + folder + '/Val/annotated.json'
    target = base + folder + '/Val/stats.json'

    # Load data.
    sentences = sentences_from_file(source)

    # Process data.
    stats = system_stats(sentences)

    # Get raw descriptions.
    gen_descriptions = [entry['caption'] for entry in load_json(source)]
    extra_stats = sentence_stats(train_descriptions, gen_descriptions)

    stats.update(extra_stats)

    # Save data.
    save_json(stats, target)
from methods import parallel_sentences_from_file, parallel_stats, load_json, save_json, sentence_stats

train = parallel_sentences_from_file('./Data/COCO/Processed/tokenized_train2014.json',
                                     tagged=False,  # Don't load tags.
                                     lower=True)    # Lowercase all descriptions.

val   = parallel_sentences_from_file('./Data/COCO/Processed/tagged_val2014.json',
                                     tagged=False,  # Don't load tags.
                                     lower=True)    # Lowercase all descriptions.

# Compute stats for train and val data.
train_stats = parallel_stats(train)
val_stats   = parallel_stats(val)

# Extra stats.
train_data = load_json('./Data/COCO/Processed/tokenized_train2014.json')
train_descriptions = [entry['caption'] for entry in train_data['annotations']]

val_data = load_json('./Data/COCO/Processed/tagged_val2014.json')
val_descriptions = [entry['caption'] for entry in val_data['annotations']]

extra_stats = sentence_stats(train_descriptions, val_descriptions)

val_stats.update(extra_stats)

# Save data to file.
save_json(train_stats, './Data/COCO/Processed/train_stats.json')
save_json(val_stats, './Data/COCO/Processed/val_stats.json')
Exemple #10
0
        'Vinyals-et-al-2017':
        '\citeauthor{vinyals2017show} (\citeyear{vinyals2017show})',
        'Wu-et-al-2016': '\citeauthor{wu2017image} (\citeyear{wu2017image})',
        'Zhou-et-al-2017':
        '\citeauthor{zhou2017watch} (\citeyear{zhou2017watch})'
    }

    data = [val_row] + system_rows
    headers = [
        '', '2', '3', '4', 'Ratio', 'Types-2', '1', '2', '3', '4', '5',
        'Ratio', 'Types-1'
    ]
    table = tabulate(data, headers, tablefmt='latex_booktabs')
    # Modify table
    # table = table.replace('{lrrrrrrrrrr}','{lrrrcrrrrrc}')
    table = table.replace('&    0.3  &', '&    0.30  &')
    table = table.replace(
        '\\toprule',
        '\\toprule \n & \multicolumn{3}{c}{Compound length} & \multicolumn{2}{c}{Compound stats} & \multicolumn{5}{c}{Prepositional phrase depth} & \multicolumn{2}{c}{PP stats}\\\\\n \cmidrule(lr){2-4} \cmidrule(lr){5-6} \cmidrule(lr){7-11} \cmidrule(lr){12-13}\n'
    )
    for system, cite in systems.items():
        table = table.replace(system, cite)

    # Print and save.
    print(table)

    with open('./Data/Output/nouns_pps_table.txt', 'w') as f:
        f.write(table)

    save_json(all_data, './Data/Output/nouns_pps.json')