args.classifier_dropout_rate = .0 args.checkpoint_path = "./../outputs/{}/{}_Adam_X/{}_bestModel.pwf".format( encoder, encoder, encoder) model = MLP(args) model.load_state_dict(torch.load(args.checkpoint_path, map_location=device)) model.to(device) model.eval() for min_len, max_len in zip([0, 10, 15, 20], [10, 15, 20, 100]): dataset = Dataset(args) dataset.filter_based_on_length(data_set, min_len, max_len) accu = evaluate(dataset, model, args.batch_size, args.data_set) P[encoder].append(accu) print(encoder, min_len, max_len, accu) sns.set() plt.figure(1) for encoder in ["BoW", "LSTM", "biLSTM", "biLSTM_maxp"]: plt.plot(P[encoder], label=encoder) plt.legend() plt.xlabel("mean number of tokens") plt.ylabel("accuracy") plt.xticks(np.arange(4), ("0-10", "10-15", "15-20", "20-max"))