Example #1
0
def visualize_probability_convergence():
    """
    Visualize the convergence of sampling 
    """
    fair_probs = [1.0 / 6] * 6

    # Run 500 experiments where you roll the dice ten times.
    counts = np.random.multinomial(10, fair_probs, size=500)

    # Summarize all of the counts in each column for each row.
    cum_counts = counts.astype(np.float32).cumsum(axis=0)

    # Compute the estimate probability for each dice being rolled through
    # all of the experiments. This will converge towards 16%
    estimates = cum_counts / cum_counts.sum(axis=1, keepdims=True)

    # Plot each estimated probability
    d2l.set_figsize((6, 4.5))
    for i in range(6):
        d2l.plt.plot(estimates[:, i].asnumpy(), label=f"P(die={str(i+1)})")

    #  Add the true probability of you rolling any dice number
    d2l.plt.axhline(y=0.167, color="black", linestyle="dashed")

    # Set the x and y label for the current axes
    d2l.plt.gca().set_xlabel("Groups of experiments")
    d2l.plt.gca().set_ylabel("Estimated probability")

    # Create the legend and save the figure as an image.
    d2l.plt.legend()
    d2l.plt.savefig("probability_convergence.png")
Example #2
0
def plot_generated_data_points(features: np.array, targets: np.array) -> None:
    """
    Plot our features and targets in a scatter plot.
    """
    d2l.set_figsize((3.5, 2.5))
    d2l.plt.scatter(features[:, 1].asnumpy(), targets.asnumpy(), 1)
    d2l.plt.savefig("generated_data")
Example #3
0
def show_data():
    print("feature size {}, label size {}".format(features.size, labels.size))
    print("feature shape {}, label shape {}".format(features.shape,
                                                    labels.shape))
    print('features:', features[0], '\nlabel:', labels[0])
    d2l.set_figsize((3.5, 2.5))
    f = features[:, 0].asnumpy()
    l = labels.asnumpy()
    d2l.plt.scatter(f, l, 1)
    print("Saving as ../images/train_data.svg")
    plt.savefig('../images/train_data.svg')
def semilogy(x_vals,
             y_vals,
             x_label,
             y_label,
             x2_vals=None,
             y2_vals=None,
             legend=None,
             figsize=(8, 4)):
    d2l.set_figsize(figsize)
    d2l.plt.xlabel(x_label)
    d2l.plt.ylabel(y_label)
    d2l.plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':')
        d2l.plt.legend(legend)
Example #5
0
def plot(
    X,
    Y=None,
    x_label=None,
    y_label=None,
    x_limit=None,
    y_limit=None,
    x_scale="linear",
    y_scale="linear",
    fig_size=(3.5, 2.5),
    fmts=["-", "m--", "g-", "r:"],
    legend=[],
    axes=None,
):
    d2l.set_figsize(fig_size)
    axes = axes if axes else d2l.plt.gca()

    def has_one_axis(X):
        return (hasattr(X, "ndim")
                and X.ndim == 1) or (isinstance(X, list)
                                     and not hasattr(X[0], "__len__"))

    if has_one_axis(X):
        X = [X]

    if Y is None:
        X, Y = [[]] * len(X), X
    elif has_one_axis(Y):
        Y = [Y]

    if len(X) != len(Y):
        X = X * len(Y)

    axes.cla()

    for x, y, fmt in zip(X, Y, fmts):
        if len(x):
            axes.plot(x, y, fmt)
        else:
            axes.plot(y, fmt)

    configure_axes(axes, x_label, y_label, x_limit, y_limit, x_scale, y_scale,
                   legend)
Example #6
0
        mae_sum += bbox_eval(bbox_preds, bbox_labels, bbox_masks)
        m += bbox_labels.size

    if (epoch + 1) % 5 == 0:
        print('epoch %2d, class err %.2e, bbox mae %.2e, time %.1f sec' %
              (epoch + 1, 1 - acc_sum / n, mae_sum / m, time.time() - start))

# %% [markdown]
# ## Prediction


# %%
def predict(X):
    anchors, cls_preds, bbox_preds = net(X.as_in_context(ctx))
    cls_probs = cls_preds.softmax().transpose((0, 2, 1))
    output = contrib.nd.MultiBoxDetection(cls_probs, bbox_preds, anchors)
    idx = [i for i, row in enumerate(output[0]) if row[0].asscalar() != -1]
    return output[0, idx]


img = image.imread('pikachu.jpg')
feature = image.imresize(img, 256, 256).astype('float32')
X = feature.transpose((2, 0, 1)).expand_dims(axis=0)
output = predict(X)

# %% [markdown]
# Visualize the results.

# %%
d2l.set_figsize((5, 5))
def xyplot(x_vals, y_vals, name):
    d2l.set_figsize(figsize=(5, 2.5))
    d2l.plt.plot(x_vals.asnumpy(), y_vals.asnumpy())
    d2l.plt.xlabel('x')
    d2l.plt.ylabel(name + '(x)')
    d2l.plt.show()
Example #8
0
import sys

sys.path.insert(0, '..')

import d2l
from mpl_toolkits import mplot3d
import numpy as np


def f(x):
    return x * np.cos(np.pi * x)


d2l.set_figsize((4.5, 2.5))
x = np.arange(-1.0, 2.0, 0.1)
fig, = d2l.plt.plot(x, f(x))
fig.axes.annotate('local minimum',
                  xy=(-0.3, -0.25),
                  xytext=(-0.77, -1.0),
                  arrowpros=dict(arrowstyle='->'))
d2l.plt.xlabel('x')
#!pip install https://apache-mxnet.s3-accelerate.amazonaws.com/dist/python/numpy/latest/mxnet-1.5.0-py2.py3-none-manylinux1_x86_64.whl
!pip install mxnet_cu100
!pip install matplotlib

ctx = d2l.try_gpu()
ctx

# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
import d2l
from mxnet import autograd, gluon, image, init, nd
from mxnet.gluon import nn
import matplotlib
import numpy as np

d2l.set_figsize((3.5, 2.5))
content_img = image.imread("lol.jpg")
print(content_img.shape)
d2l.plt.imshow(content_img.asnumpy())

style_img = image.imread("scream.jpg")

d2l.plt.imshow(style_img.asnumpy())
print(style_img.shape)

def rgb_mean_calc(img):
  mat = img.transpose((2, 0, 1))
  mat = mat.asnumpy()
  mean_rgb = []
  mean_std = []
  for i in range(3):
Example #10
0
text = preprocess_raw(raw_text)
print(text[0:1000])

# 分词
num_examples = 50000
source, target = [], []
for i, line in enumerate(text.split('\n')):
    if i > num_examples:
        break
    parts = line.split('\t')
    if len(parts) >= 2:
        source.append(parts[0].split(' '))
        target.append(parts[1].split(' '))

d2l.set_figsize()
d2l.plt.hist([[len(l) for l in source], [len(l) for l in target]], label=['source', 'target'])
d2l.plt.legend(loc='upper right');


# 建立词典
def build_vocab(tokens):
    tokens = [token for line in tokens for token in line]
    return d2l.data.base.Vocab(tokens, min_freq=3, use_special_tokens=True)


src_vocab = build_vocab(source)
len(src_vocab)


# 载入数据集