def diffusion_trend(ind): iterations, iterations_mitigations = evaluate.evaluate_individual( toolbox.compile(ind), m=model, traveler_set=travelers, mvc_set=minimal_vertex_cover, vert_avg_dist=vertex_average_distance, number_vertex_shortest=number_shortest_paths, Page_Rank=page_rank, Cluster_Coeff=cluster_coef, avg_degree=average_degree, short_dist=shortest_distances, avg_dist=average_distance, total_iterations=ITERATIONS, measure_every=MEASURE_EVERY, mitigations_per_measure=MITIGATIONS_PER_MEASURE, rollover=ROLLOVER, use_all=USE_ALL) trends = model.build_trends(iterations) # Visualization viz = DiffusionTrend(model, trends) viz.plot() viz = DiffusionPrevalence(model, trends) viz.plot() return iterations, trends
def evaulate_trained_individual(self): """testing evaluate.train_individual_fast""" # create toolbox using deap_functions toolbox = deap_functions.create_toolbox() # loading data # input_file = '../data/sample.csv' input_file = '../data/train.csv' image_size = 28 training_dataset, testing_dataset, validating_dataset, training_labels, testing_labels, validating_labels = \ data_load.data_load(input_file, image_size) # create one individual individual = toolbox.individual() # run training accuracy, protobuf = evaluate.train_individual_fast( individual, validating_dataset, validating_labels) # run evaluation in order to check if accuracy of the individual is equal to accuracy # calculated as part of the training accuracy = evaluate.evaluate_individual(individual, protobuf, testing_dataset, testing_labels) print("accuracy from training: ", accuracy) print("type of model :", type(protobuf)) import sys print(protobuf) self.assertTrue(hasattr(toolbox, 'individual')) print("size of model : ", sys.getsizeof(protobuf)) print("deep size of the the model : ", deep_getsizeof(protobuf, set())) # use model in protobuf to evaluate individual accuracy = evaluate.evaluate_individual(individual, protobuf, testing_dataset, testing_labels) print("accuracy from evaluation: ", accuracy)
page_rank = get_all_page_rank(model) cluster_coef = clustering_coefficient(model) # Evaluate the function # If we are doing the non mitigation # we must not do a secondary strategy if FUNCTION.__name__ != "mitigation_none": iterations, iterations_mitigations = evaluate.evaluate_individual( FUNCTION, m=model, traveler_set=travelers, mvc_set=minimal_vertex_cover, vert_avg_dist=vertex_average_distance, number_vertex_shortest=number_shortest_paths, Page_Rank=page_rank, Cluster_Coeff=cluster_coef, avg_degree=average_degree, short_dist=shortest_distances, avg_dist=average_distance, total_iterations=ITERATIONS, measure_every=MEASURE_EVERY, mitigations_per_measure=MITIGATIONS_PER_MEASURE, rollover=ROLLOVER, use_all=USE_ALL) else: iterations, iterations_mitigations = evaluate.evaluate_individual( FUNCTION, m=model, traveler_set=travelers, mvc_set=minimal_vertex_cover, vert_avg_dist=vertex_average_distance,