def restart_level(self): """Restart the current level.""" if not self.game.debug: save_game = utility.load_object('level_start') utility.transfer_inventory(save_game.player, self.game.player) self.game.setup(self.game.level.number) self.game.ui = MainUI(self.game)
def calcul_metric_concours(model, val_loader, use_gpu=True, show_acc_per_class=False): model.train(False) true = [] pred = [] val_loss = [] pred_top3 = [] criterion = nn.CrossEntropyLoss() model.eval() for j, batch in enumerate(val_loader): inputs, targets = batch if use_gpu: inputs = inputs.cuda() targets = targets.cuda() inputs = Variable(inputs, volatile=True) targets = Variable(targets, volatile=True) output = model(inputs) predictions = output.max(dim=1)[1] predictions_top_3 = output.topk(3)[1] val_loss.append(criterion(output, targets).item()) true.extend(targets.data.cpu().numpy().tolist()) pred.extend(predictions.data.cpu().numpy().tolist()) pred_top3.extend(predictions_top_3.data.cpu().numpy().tolist()) top3_score = mapk(true, pred_top3) * 100 acc = accuracy_score(true, pred) * 100 loss = sum(val_loss) / len(val_loss) if show_acc_per_class: conf_mat = confusion_matrix(true, pred) dec_dict = load_object("saves_obj/dec_dict.pk") acc_per_class = {} for number, name in dec_dict.items(): acc_per_class[name] = conf_mat[number, number] / np.sum( conf_mat[number, :]) pass print(conf_mat) print(acc_per_class) model.train(True) return acc, loss, top3_score
def main(): if args.debug: debug = args.debug level = args.level[0] if args.level else 1 with Game(debug=debug, level=level) as game: game.mainloop() else: if utility.save_exists('game_exit'): with utility.load_object('game_exit') as game: game.mainloop() else: with Game() as game: game.mainloop()
def generate_random_dataset( path, nb_row_valid,nb_rows_test,nb_rows,dict_nb_lignes, size_image=224, encoding_dict=None,filenames=None, use_acc_proportionate_sampling=False): ''' Pour chaque classe dans filenames, on prend nb_rows données aléatoire dans le fichier :param path: :param nb_row_valid: :param nb_rows_test: :param nb_rows: :param size_image: :param encoding_dict: :param filenames: :return: ''' if filenames==None: filenames = os.listdir(path) if use_acc_proportionate_sampling: if os.path.isfile("saves_obj/dict_acc_per_class_valid.pk"): dict_acc_class=load_object("saves_obj/dict_acc_per_class_valid.pk") else: print("Aucun dictionnaire d'accuracy par classe trouvé; sampling uniforme utilisé") use_acc_proportionate_sampling=False nb_lignes_skip = nb_row_valid + nb_rows_test list_dataset=[] dict_nb_row_used_per_class={} for fn in filenames: n = dict_nb_lignes[fn] skip =list(range(1,nb_lignes_skip)) +sorted(random.sample(range(nb_lignes_skip,n), n - nb_rows-nb_lignes_skip)) if use_acc_proportionate_sampling: acc=dict_acc_class[fn[:-4]] new_rows=int((1.1-acc)*nb_rows ) else: new_rows=nb_rows dict_nb_row_used_per_class[fn]=new_rows data_set=DoodlesDataset(fn, path, nrows=new_rows, size=size_image, skiprows=skip, encoding_dict=encoding_dict, mode="train") list_dataset.append(data_set) doodles = ConcatDataset(list_dataset) print("Sample données d'entraînement (total:{}):".format(sum(dict_nb_row_used_per_class.values())),dict_nb_row_used_per_class) return doodles
from utility import load_object import numpy as np dict_acc_general=load_object("dict_acc_per_class_valid_model_general.pk") dict_acc_cible=load_object("dict_acc_per_class_valid_model_mauvaises_classes_sampling.pk") def print_sorted_dict(dictionnaire): low_bad_list=[] i=0 for key, value in sorted(dictionnaire.items(), key=lambda item: item[1]): print("%s: %s" % (key, value)) i+=1 if i<=50: low_bad_list.append(value) print("Acc moyenne 20 pires: ",np.mean(low_bad_list)) # print_sorted_dict(dict_acc_general) print_sorted_dict(dict_acc_cible)
path_save_model, use_gpu, get_prob_pred=True) pred = list_pred[file_number - 1] prob = list_tensor_prob[file_number - 1].data.numpy() pred_string = [decoding_dict[element] for element in pred] return pred_string, prob if __name__ == "__main__": start_time = time.time() path_save_model = "saves_model/model_info.tar" use_gpu = False file_number = 1 path = 'D:/User/William/Documents/Devoir/Projet Deep/data/photo_inference/' decoding_dict = load_object("saves_obj/dec_dict.pk") predtiction_classe, prob = predict_image_classes(path, path_save_model, use_gpu, decoding_dict, file_number) print(predtiction_classe) print(prob) print("Temps de calcul: {} secondes".format( time.time() - start_time)) #Plus rapide sans GPU