コード例 #1
0
 def draw(self, path, label, conf):
     sig, sr = load_audio(path)
     sig = torch.tensor(sig).view(1, 1, -1).float()
     spec = self.mel(sig)[0]
     out_path = '{}_pred.png'.format(os.path.basename(path).split('.')[0])
     pred_txt = "%s (%.1f%%)" % (label, 100 * conf)
     plot_heatmap(spec.cpu().numpy(), out_path, pred=pred_txt)
コード例 #2
0
def main():
    graph = utils.load_graph()
    edge_distribution = get_edge_distribution(graph)
    utils.plot_heatmap(
        edge_distribution,
        'Row-Normalized Inter-Species Edge Weight Distribution Post Network Enhancement',
        'images/edge_distribution.png')
コード例 #3
0
def run_classifier(c_value, kernel='linear'):
    # multiclass support is handled by one-vs-one scheme
    classifier = svm.SVC(C=c_value, kernel=kernel)
    classifier.fit(X_train, y_train)

    test_predicted = classifier.predict(X_test)
    train_predicted = classifier.predict(X_train)

    assert len(test_predicted) == len(y_test)

    test_confusion = get_confusion_matrix(test_predicted, y_test)
    train_confusion = get_confusion_matrix(train_predicted, y_train)
    plot_heatmap(test_confusion,
                 f'../confusions/test_c_{c_value}_kernel_{kernel}.png')
    plot_heatmap(train_confusion,
                 f'../confusions/train_c_{c_value}_kernel_{kernel}.png')

    test_precision = get_precision(test_confusion)
    train_precision = get_precision(train_confusion)

    # correct = sum(1 for x in filter(lambda x: x, list(map(lambda x: x[0] == x[1], results))))
    # print(f'There are {correct} out of {len(X_test)} samples. This gives us an accuracy of {correct/len(X_test)}')

    print(f'Finished iteration with c {c_value} and kernel {kernel}')

    return test_precision, train_precision
コード例 #4
0
def test_group(generator, eng, numImgs, params, test_num):
    generator.eval()

    logging.info('Test group starts. \n')
    Efficiency = torch.zeros(numImgs)

    if params.heatmap:
        lamda_list = list(
            range(params.hwstart, params.hwend + params.hwstride,
                  params.hwstride))
        theta_list = list(
            range(params.hastart, params.haend + params.hastride,
                  params.hastride))
        H = len(lamda_list)
        W = len(theta_list)
        heat_scores = np.zeros((H, W))
        with tqdm(total=H * W, ncols=70) as t:
            for lamda, i in zip(lamda_list[::-1], range(H)):
                for theta, j in zip(theta_list, range(W)):
                    img, _ = generate_test_images(generator, numImgs, params,
                                                  lamda, theta)
                    wavelength = matlab.double([lamda] * numImgs)
                    desired_angle = matlab.double([theta] * numImgs)
                    abseffs = eng.Eval_Eff_1D_parallel(img, wavelength,
                                                       desired_angle)
                    Efficiency = torch.Tensor(
                        [abseffs]).data.cpu().numpy().reshape(-1)
                    heat_scores[i, j] = np.max(Efficiency)
                    t.update()
        fig_path = params.output_dir + '/figures/heatmap_batch{}.png'.format(
            params.solver_batch_size_start)
        utils.plot_heatmap(lamda_list, theta_list, heat_scores, fig_path)
        print("Plot heatmap successfully!")

    else:
        max_eff_index = []
        max_eff = []
        best_struc = []
        with tqdm(total=test_num, ncols=70) as t:
            for i in range(test_num):
                lamda = random.uniform(600, 1200)
                theta = random.uniform(40, 80)
                img, strucs = generate_test_images(generator, numImgs, params,
                                                   lamda, theta)
                wavelength = matlab.double([lamda] * numImgs)
                desired_angle = matlab.double([theta] * numImgs)
                abseffs = eng.Eval_Eff_1D_parallel(img, wavelength,
                                                   desired_angle)
                Efficiency = torch.Tensor([abseffs
                                           ]).data.cpu().numpy().reshape(-1)
                max_now = np.argmax(Efficiency)
                max_eff_index.append(max_now)
                max_eff.append(Efficiency[max_now])
                best_struc.append(strucs[max_now, :, :].reshape(-1))
                t.update()

        print('{} {:.2f} {} {:.2f} {} {:.2f} {} {:.2f} '.format(
            'Lowest:', min(max_eff), 'Highest:', max(max_eff), 'Average:',
            np.mean(np.array(max_eff)), 'Var:', np.var(np.array(max_eff))))
コード例 #5
0
def plot_heatmap():
    """
    plot a example of heatmap
    :return: figure
    """
    dir = "log/heatmap"
    pattern = r'Ligne (\d{1,2}).csv$'
    utils.construct_heatmap_set(dir, pattern)
    df_heat = pd.read_csv('data/heat.csv', sep=';')
    heatmap_dir = os.path.join('figures', 'heatmaps')
    for beacon in colums:
        utils.plot_heatmap(df_heat, beacon)
        plt.title(beacon)
        if not os.path.isdir(heatmap_dir):
            os.makedirs(heatmap_dir)
        plt.savefig(
            os.path.join(heatmap_dir,
                         utils.find_beacon_name(beacon) + '.png'))
    def _store_batch(self, data, batch_size, output, target):

        path = 'eval_batch'
        mkdir_p(path)
        sig, lengths, _ = data

        inds = output.argmax(1)
        confs = torch.exp(output)[torch.arange(batch_size), inds]

        spec, lengths = self.mel(sig.transpose(1, 2).float(), lengths)

        for i in range(batch_size):
            if inds[i] == target[i]:
                label = self.model.classes[inds[i]]
                pred_txt = "%s (%.1f%%)" % (label, 100 * confs[inds[i]])
                out_path = os.path.join(path, '%s.png' % i)
                plot_heatmap(spec[i][..., :lengths[i]].cpu().numpy(),
                             out_path,
                             pred=pred_txt)
コード例 #7
0
            observation_, reward, done = env.step(action)
            score += reward
            agent.store_transition(observation, action, reward, observation_,
                                   done)
            agent.learn()
            observation = observation_
        scores.append(score)
        eps_history.append(agent.epsilon)

        avg_score = np.mean(scores[-100:])

        print('episode ', i + 1, 'score %.2f' % score,
              'average score %.2f' % avg_score, 'epsilon %.2f' % agent.epsilon)

    NT = 10
    kappa = 1
    theta = 1
    sigma = 0.02
    smin = theta - 3 * sigma / np.sqrt(2 * kappa)  # min price
    smax = theta + 3 * sigma / np.sqrt(2 * kappa)  # max price
    invgrid = 21
    pricegrid = 21
    actiongrid = 11

    for i in range(NT + 1):
        plot_heatmap(i, smin, smax, invgrid, pricegrid, actiongrid, agent)

    x = [i + 1 for i in range(n_games)]
    filename = 'opt_exec_ddqn_learningcurve.png'
    plot_learning_curve(x, scores, eps_history, filename)
コード例 #8
0
ファイル: gridworld.py プロジェクト: AubreyC/rl_experiments
    # Take some actions
    for i in range(30):
        done_flag, st, rw = gw.take_action(2);
        done_flag, st, rw = gw.take_action(3);

    # Grid map of agent's path
    steps = 0;
    grid_path = copy.copy(gw.grid);
    for state_2d_visited in gw.get_state_2d_log():
        grid_path[state_2d_visited[0],state_2d_visited[1]] = steps;
        steps = steps + 1;

    # Plots
    plt.figure()
    plt.subplot(1, 2, 1)
    utils.plot_heatmap(gw.grid, "Reward Map", False)
    plt.subplot(1, 2, 2)
    utils.plot_heatmap(grid_path, "Agent path", False)

    plt.figure()
    utils.plot_heatmap(gw.grid, "Reward Map", False)

    plt.show()







コード例 #9
0
ファイル: scanpy_proc.py プロジェクト: yqshao17/single_cell
seurat_raw=seurat

seurat.var.index=[gene_info['gene_id_to_name'][gene] for gene in seurat.var.index.values]
seurat.var_names_make_unique()

sc.tl.rank_genes_groups(seurat, 'clusters', method='wilcoxon')
sc.pl.rank_genes_groups(seurat, n_genes=25, sharey=False)

# plot heatmap of specific genes
specific_genes=pd.DataFrame(seurat.uns['rank_genes_groups']['names']).loc[0:20,:].T.values.flatten()
X=pd.DataFrame(seurat[:,specific_genes].layers['norm_data']).T
X.index=specific_genes
X.columns=seurat.obs['clusters']
from scale_plot_feature import plot_heatmap
plot_heatmap(X, y=seurat.obs['clusters'], #row_labels=specific_genes, 
                     ncol=3, cmap='Reds',vmax=1, row_cluster=False, legend_font=6, cax_title='Peak Value',
                    figsize=(8, 10), bbox_to_anchor=(0.4, 1.2), position=(0.8, 0.76, 0.1, 0.015),
                     save='test_specific_feature.png')

marker_genes={
        'CD45':['PTPRC'],
        'CD8_T':['CD8A', 'CD8B'],
        'exhausted_CD8':['LAG3', 'CD244', 'EOMES', 'PTGER4'],
        'T_cell':['CD6', 'CD3D', 'CD3E', 'SH2D1A', 'TRAT1', 'CD3G'],
        'B_cell':['BLK', 'CD19', 'FCRL2', 'MS4A1', 'KIAA0125', 'TNFRSF17', 'TCL1A', 'SPIB', 'PNOC'],
        'NK':['XCL1', 'XCL2', 'NCR1'],
        'NK_CD56':['KIR2DL3', 'KIR3DL1', 'KIR3DL2', 'IL21R'],
        'DC':['CCL13', 'CD209', 'HSD11B1'],
        'macro':['CD68','CD84', 'CD163', 'MS4A4A'],     
        'mast':['TPSB2', 'TPSAB1', 'CPA3', 'MS4A2', 'HDC'],
        'neutrophil':['FPR1', 'SIGLEC5', 'CSF3R', 'FCAR', 'FCGR3B', 'CEACAM3', 'S100A12'],
        'Th1':['TBX21'],   
コード例 #10
0
def main():
    if not os.path.isfile(args.model_name) or args.continue_train:
        if args.continue_train:
            print("Loading tagger model from " + args.model_name + "...")
            tagger_model = torch.load(
                args.model_name, map_location=lambda storage, loc: storage)
            if args.gpu:
                tagger_model = tagger_model.cuda()
        else:
            print("Creating new model...")
            tagger_model = factorial_crf_tagger.DynamicCRF(args, word_freq, langs, len(char_to_ix), \
                      len(word_to_ix), unique_tags)
            if args.gpu:
                tagger_model = tagger_model.cuda()

        if args.unit_test:
            tests = unit.TestBP()
            labelSum = sum([tag.size() for tag in tagger_model.uniqueTags])
            # Create dummy LSTM features
            lstm_feats = utils.get_var(
                torch.Tensor(torch.randn(len(training_data[0][0]), labelSum)),
                args.gpu)
            tests.setUp(tagger_model, training_data[0][1],
                        len(training_data[0][0]), lstm_feats)

        loss_function = nn.NLLLoss()
        # Provide (N,C) log probability values as input
        # loss_function = nn.CrossEntropyLoss()

        if args.optim == "sgd":
            optimizer = optim.SGD(tagger_model.parameters(), lr=1.0)
        elif args.optim == "adam":
            optimizer = optim.Adam(tagger_model.parameters())
        elif args.optim == "adagrad":
            optimizer = optim.Adagrad(tagger_model.parameters())

        print("Training FCRF-LSTM model...")
        patience_counter = 0
        prev_avg_tok_accuracy = 0
        for epoch in xrange(args.epochs):
            accuracies = []
            sent = 0
            batch_idx = 0
            tokens = 0
            cum_loss = 0
            correct = 0
            random.shuffle(train_order)
            print("Starting epoch %d .." % epoch)

            start_time = time.time()
            for start_idx, end_idx in train_order:
                train_data = training_data[start_idx:end_idx + 1]
                train_sents = [elem[0] for elem in train_data]
                morph_sents = [elem[1] for elem in train_data]

                lang_ids = train_lang_ids[start_idx:end_idx + 1]

                sent += end_idx - start_idx + 1
                tokens += sum([len(sentence) for sentence in train_sents])
                batch_idx += 1

                if batch_idx % 5 == 0:
                    print("[Epoch %d] \
                        Sentence %d/%d, \
                        Tokens %d \
                        Cum_Loss: %f \
                        Time: %f \
                        Tokens/Sec: %d"

                          # Average Accuracy: %f"
                          %
                          (epoch, sent, len(training_data), tokens, cum_loss /
                           tokens, time.time() - start_time, tokens /
                           (time.time() - start_time)))
                    # , correct/tokens))

                tagger_model.zero_grad()

                sents_in = []

                for i, sentence in enumerate(train_sents):
                    sent_in = []
                    lang_id = []
                    if args.model_type == "universal":
                        lang_id = [lang_ids[i]]

                    for word in sentence:
                        s_appended_word = lang_id + [c for c in word] + lang_id
                        word_in = utils.prepare_sequence(
                            s_appended_word, char_to_ix, args.gpu)
                        # targets = utils.prepare_sequence(s_appended_word[1:], char_to_ix, args.gpu)
                        sent_in.append(word_in)
                    sents_in.append(sent_in)

                # sents_in = torch.stack(sent_in)
                tagger_model.char_hidden = tagger_model.init_hidden()
                tagger_model.hidden = tagger_model.init_hidden()

                if args.sum_word_char:
                    all_word_seq = []
                    for sentence in train_sents:
                        word_seq = utils.prepare_sequence(
                            sentence, word_to_ix, args.gpu)
                        all_word_seq.append(word_seq)
                else:
                    all_word_seq = None

                if args.model_type == "specific" or args.model_type == "joint":
                    lstm_feat_sents, graph, maxVal = tagger_model(
                        sents_in,
                        morph_sents,
                        word_idxs=all_word_seq,
                        langs=lang_ids)
                else:
                    lstm_feat_sents, graph, maxVal = tagger_model(
                        sents_in, morph_sents, word_idxs=all_word_seq)

                # Skip parameter updates if marginals are not within a threshold
                if maxVal > 10.00:
                    print("Skipping parameter updates...")
                    continue

                # Compute the loss, gradients, and update the parameters
                all_factors_batch = []

                for k in range(len(train_sents)):
                    all_factors = tagger_model.get_scores(
                        graph, morph_sents[k], lstm_feat_sents[k], k)
                    all_factors_batch.append(all_factors)

                loss = tagger_model.compute_loss(all_factors_batch,
                                                 loss_function)
                # print("Loss:", loss)

                cum_loss += loss.cpu().data[0]
                loss.backward()
                # tagger_model.gradient_check(all_factors_batch[0])
                optimizer.step()

            print("Loss: %f" % loss.cpu().data.numpy())
            print("Saving model..")
            torch.save(tagger_model, args.model_name)
            if (epoch + 1) % 4 == 0:
                print("Evaluating on dev set...")
                avg_tok_accuracy, f1_score = eval_on_dev(tagger_model,
                                                         curEpoch=epoch)

                # Early Stopping
                if avg_tok_accuracy <= prev_avg_tok_accuracy:
                    patience_counter += 1
                    if patience_counter == args.patience:
                        print(
                            "Model hasn't improved on dev set for %d epochs. Stopping Training."
                            % patience_counter)
                        break

                prev_avg_tok_accuracy = avg_tok_accuracy
    else:
        print("Loading tagger model from " + args.model_name + "...")
        tagger_model = torch.load(args.model_name,
                                  map_location=lambda storage, loc: storage)
        if args.gpu:
            tagger_model = tagger_model.cuda()
        else:
            tagger_model.gpu = False

        if args.visualize:
            print("[Visualization Mode]")
            utils.plot_heatmap(unique_tags, tagger_model.pairwise_weights,
                               "pair")
            #utils.plot_heatmap(unique_tags, tagger_model.transition_weights, "trans")
            #utils.plot_heatmap(unique_tags, tagger_model.lang_pairwise_weights, "pair", lang_idx=1)
            print("Stored plots in figures/ directory!")

        if args.test:
            avg_tok_accuracy, f1_score = eval_on_dev(tagger_model,
                                                     dev_or_test="test")
コード例 #11
0
 def plot_heatmap(X, y_true, y_pred):
     plot_heatmap(X, y_true, y_pred)
コード例 #12
0
ファイル: LP_irl.py プロジェクト: AubreyC/rl_experiments
            a_opt = policy_opt[s_1d]
            grid_pol[s_i, s_j] = a_opt

    # Create a Grid to show the path of the worker
    # The number of steps of the agent is shwon
    grid_path = copy.copy(gw.grid)
    steps = 0
    for state_visited in gw.get_state_2d_log():
        grid_path[state_visited[0], state_visited[1]] = steps
        #state_visited[0]+state_visited[1];
        steps = steps + 1

    # Plot results
    plt.figure()
    plt.subplot(1, 3, 1)
    utils.plot_heatmap(gw.grid, "Original Reward", False)
    plt.subplot(1, 3, 2)
    utils.plot_heatmap(v_states, "Value Function", False)
    plt.subplot(1, 3, 3)
    utils.plot_heatmap(R_lp, "Recovered Reward", False)

    plt.figure()
    plt.subplot(1, 2, 1)
    utils.plot_heatmap(grid_path, "Agent Path", False)
    plt.subplot(1, 2, 2)
    utils.plot_policy(grid_pol, gw.actions_2d, "Policy Opt", False)

    plt.figure()
    utils.plot_heatmap(R_lp, "Reward recovered", False)

    plt.show()
コード例 #13
0
ファイル: max_ent.py プロジェクト: AubreyC/rl_experiments
                               dem_paths,
                               max_path_step=50)

    # Run Value Iteration algortims
    v_states_maxent = value_iteration.run_value_iteration(
        n_states, n_actions, p_trans, r_maxent, terminal_state_1d, gamma)
    v_states_maxent = np.reshape(v_states_maxent, gw.grid.shape, order='F')
    v_states_maxent = v_states_maxent.astype(float)

    # Transform in grid form
    r_maxent_grid = np.reshape(r_maxent, (size_grid, size_grid), order='C')
    r_maxent_grid = r_maxent_grid.astype(float)

    print("r_maxent_grid:{}".format(r_maxent_grid))

    # Plot results
    plt.figure()
    plt.subplot(1, 2, 1)
    utils.plot_heatmap(gw.grid, "Original Reward", False)
    plt.subplot(1, 2, 2)
    utils.plot_heatmap(v_states, "Value Function", False)

    plt.figure()
    plt.subplot(1, 2, 1)
    utils.plot_heatmap(r_maxent_grid, "Recovered Reward: MaxEnt", False)
    plt.subplot(1, 2, 2)
    utils.plot_heatmap(v_states_maxent, "Recovered Value Function: MaxEnt",
                       False)

    plt.show()
コード例 #14
0
# 训练
sess = tf.Session()
sess.run(initializer)

for i in range(args.num_train_steps):
    # 产生一个 batch 的样本. seq_len在0-20键随机采样,label的维度=seq_len,
    # input中含有干扰因素,因而其维度为 seq_len*2+1. 例如: 10, (32, 21, 9), (32, 10, 8)
    seq_len, inputs, labels = data_generator.generate_batches(
        1,
        args.batch_size,
        bits_per_vector=args.num_bits_per_vector,
        max_seq_len=args.max_seq_len,
    )[0]
    # 损失计算
    train_loss, _, outputs = sess.run(
        [model.loss, model.train_op, model.output],
        feed_dict={
            inputs_placeholder: inputs,
            outputs_placeholder: labels,
            max_seq_len_placeholder: seq_len
        })
    # 准确率计算
    avg_errors_per_seq = data_generator.error_per_seq(labels, outputs,
                                                      args.batch_size)
    print('Epoch: ({0}), Loss : {1}'.format(i, train_loss / seq_len),
          ', acc: {0}%'.format((1. - avg_errors_per_seq) * 100))
    # save heatmap
    if i % 50 == 0:
        plot_heatmap(i, inputs, outputs)
コード例 #15
0
def draw_paths(paths):

    plot_heatmap(loss_2d, -20, 20, -20, 20)

    for init, steps, name in paths:
        plot_gd2(path=(init, steps), label=name)
コード例 #16
0
ファイル: gene_promoter.py プロジェクト: yqshao17/single_cell
def gene_promoter_heatmap(genedata,
                          prodata,
                          n_genes,
                          outdir='.',
                          group=True,
                          filter_cor=False):

    # plot cluster-specific genes and promoter accessibility
    # genedata is clusterd by Seurat or other methods,
    # and saved and read as adata, with 'clusters' as obs key

    # whether normalize genedata or prodata?
    save_prefix = outdir + '/'
    genedata2, prodata2 = overlap_adata(genedata, prodata)

    sc.tl.rank_genes_groups(genedata2, 'clusters', method='wilcoxon')
    specific_genes = pd.DataFrame(
        genedata2.uns['rank_genes_groups']['names']).loc[
            0:n_genes, :].T.values.flatten()

    if filter_cor:
        save_prefix += 'filtered_'
        # filter specific genes that have high correlation
        # between gene expression and gene promoter accessibility
        specific_genes_filtered = []
        for gene in specific_genes:
            cor, pval = stats.pearsonr(genedata2.obs_vector(gene),
                                       prodata2.obs_vector(gene))
            if cor > 0 and pval < 0.05:
                specific_genes_filtered.append(gene)
        specific_genes = specific_genes_filtered

    X1 = pd.DataFrame(genedata2[:, specific_genes].layers['norm_data']).T
    #X1=pd.DataFrame(genedata2[:,specific_genes].X).T
    X1.index = specific_genes
    X1.columns = genedata2.obs['clusters']
    X2 = pd.DataFrame(prodata2[:, specific_genes].X).T
    X2.index = specific_genes
    X2.columns = genedata2.obs['clusters']

    if group:
        # group each cluster by mean expression as one cell
        X_group = X1.T
        X_group['clusters'] = X_group.index
        X_group = X_group.groupby('clusters').mean().T
        X1 = X_group
        X2_group = X2.T
        X2_group['clusters'] = X2_group.index
        X2_group = X2_group.groupby('clusters').mean().T
        X2 = X2_group
        save_prefix += 'grouped_'
    #print(X1.iloc[:5,:5])

    plot_heatmap(
        X1,
        y=X1.columns,  #row_labels=specific_genes, 
        ncol=3,
        cmap='Reds',
        vmax=1,
        row_cluster=False,
        legend_font=6,
        cax_title='Gene Value',
        figsize=(8, 10),
        bbox_to_anchor=(0.4, 1.2),
        position=(0.8, 0.76, 0.1, 0.015),
        save=save_prefix + 'genes_expression.png')

    plot_heatmap(
        X2,
        y=X2.columns,  #row_labels=specific_genes, 
        ncol=3,
        cmap='Reds',
        vmax=1,
        row_cluster=False,
        legend_font=6,
        cax_title='Promoter Value',
        figsize=(8, 10),
        bbox_to_anchor=(0.4, 1.2),
        position=(0.8, 0.76, 0.1, 0.015),
        save=save_prefix + 'promoter_accessibility.png')
コード例 #17
0
ファイル: main.py プロジェクト: fsammart/ML2021
    w_knn = WeightedKNN(X, labels_X, classes)

    # results = knn.batch_predict(Y)
    w_results = w_knn.batch_predict(Y)

    # knn_confusion = confusion_matrix(results, labels_Y, classes)
    w_knn_confusion = confusion_matrix(w_results, labels_Y, classes)

    # knn_tp = knn_confusion.trace()
    w_knn_tp = w_knn_confusion.trace()

    # knn_precisions[i] = knn_tp/knn_confusion.sum()
    w_knn_precisions[i] = w_knn_tp / w_knn_confusion.sum()

    # plot_heatmap(knn_confusion, f'knn_k_{crossed_validation_k}_i{i}.png')
    plot_heatmap(w_knn_confusion, f'w_knn_k_{crossed_validation_k}_i{i}.png')

plot_precision(knn_precisions,
               w_knn_precisions,
               crossed_validation_k,
               filename=f'precision_k_{crossed_validation_k}.png')

print()
print(
    f'Finished all iterations. Trained {len(data)} registers, tested {chunk_size} registers.'
)
print(
    f'For knn, avg precision was {knn_precisions.mean()} with max value {knn_precisions.max()}'
)
print(
    f'For weighted knn, avg precision was {w_knn_precisions.mean()} with max value {w_knn_precisions.max()}'
コード例 #18
0
    # Create a Grid to show the path of the worker
    # The number of steps of the agent is shwon
    grid_path = copy.copy(gw.grid)
    steps = 0
    for state_visited in gw.get_state_2d_log():
        grid_path[state_visited[0], state_visited[1]] = steps
        #state_visited[0]+state_visited[1];
        steps = steps + 1

    # Plot results
    plt.figure()
    # plt.subplot(1, 2, 1)
    utils.plot_heatmap(
        gw.grid,
        "Reward Map",
        False,
    )

    plt.figure()
    # plt.subplot(1, 2, 2)
    utils.plot_heatmap(v_states, "Value Function", False, True)

    plt.figure()
    # plt.subplot(1, 2, 1)
    utils.plot_heatmap(grid_path, "Agent Path", False, False)

    plt.figure()
    # plt.subplot(1, 2, 2)
    utils.plot_policy(grid_pol, gw.actions_2d, "Policy Opt", False)