else: # load pre-computed distances with open(args.distance_file, 'rb') as f_in: distances = pickle.load(f_in) print('extracted features') with open(args.output_file, 'w', buffering=1) as f_out: f_out.write("scoring,weights,{0}\n".format(','.join(correlation_metrics))) for distance_function in sorted(distance_functions.keys()): if args.image_folder is not None: # precompute distances and targets based on the ann features precomputed_distances = precompute_distances( inception_features, distance_function) distances[distance_function] = precomputed_distances else: # simply grab them from the loaded dictionary precomputed_distances = distances[distance_function] # raw correlation correlation_results = compute_correlations(precomputed_distances, target_dissimilarities, distance_function) f_out.write("{0},fixed,{1}\n".format( distance_function, ','.join( map(lambda x: str(correlation_results[x]), correlation_metrics)))) # correlation with optimized weights
'aggregated']: item_vec.append(feature_data[feature_name] ['aggregated'][scale_type][item]) else: # features extracted from categories: only have one constant scale type item_vec.append(feature_data[feature_name] ['aggregated']['metadata'][item]) item_vec = np.array(item_vec) vectors.append(item_vec.reshape(1, -1)) # compute correlations for distance_function in sorted(distance_functions.keys()): if args.feature_folder is not None: # precompute distances and targets based on the feature values precomputed_distances = precompute_distances( vectors, distance_function) if space_name not in distances: distances[space_name] = {} if scale_type not in distances[space_name]: distances[space_name][scale_type] = {} distances[space_name][scale_type][ distance_function] = precomputed_distances else: # simply grab them from the loaded dictionary precomputed_distances = distances[space_name][scale_type][ distance_function] # raw correlation correlation_results = compute_correlations( precomputed_distances, target_dissimilarities, distance_function)
recall_string = ' '.join(recall_list) print(recall_string) call(recall_string, shell=True) else: # do the evaluation evaluation_metrics = [] evaluation_results = [] if do_c or do_r: # compute overall kendall correlation of bottleneck activation to dissimilarity ratings model_outputs = model.predict(original_images) bottleneck_activation = model_outputs[1] if do_m else model_outputs[0] for distance_function in sorted(distance_functions.keys()): precomputed_distances = precompute_distances( bottleneck_activation, distance_function) kendall_fixed = compute_correlations(precomputed_distances, target_dissimilarities, distance_function)['kendall'] kendall_optimized = compute_correlations(precomputed_distances, target_dissimilarities, distance_function, 5, args.seed)['kendall'] evaluation_metrics += [ 'kendall_{0}_fixed'.format(distance_function), 'kendall_{0}_optimized'.format(distance_function) ] evaluation_results += [kendall_fixed, kendall_optimized] # compute standard evaluation metrics on the test set eval_test = model.evaluate_generator(test_seq, steps=test_steps)
# transform images for distance computation transformed_images = [] for img in images: transformed_img, image_size = downscale_image(img, aggregator_function, block_size, args.greyscale, (1,-1)) transformed_images.append(transformed_img) else: image_size = current_image_size for distance_function in sorted(distance_functions.keys()): distance_file_name = '{0}-{1}-{2}.pickle'.format(block_size, aggregator_name, distance_function) distance_file_path = os.path.join(args.distance_folder, distance_file_name) if args.image_folder is not None: # precompute distances based on transformed images and store them precomputed_distances = precompute_distances(transformed_images, distance_function) with open(distance_file_path, 'wb') as f_out_distance: pickle.dump(precomputed_distances, f_out_distance, protocol = pickle.HIGHEST_PROTOCOL) else: # simply load them from the respective pickle file (if present - skip if not) if os.path.exists(distance_file_path): with open(distance_file_path, 'rb') as f_in: precomputed_distances = pickle.load(f_in) else: continue # raw correlations correlation_results = compute_correlations(precomputed_distances, target_dissimilarities, distance_function) f_out.write("{0},{1},{2},{3},fixed,{4}\n".format(aggregator_name, block_size, image_size, distance_function, ','.join(map(lambda x: str(correlation_results[x]), correlation_metrics)))) # correlation with optimized weights