Пример #1
0
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
from nn import build_model
from nn import plot_decision_boundary
from nn import predict
from nn import build_model_691
np.random.seed(0)
X, y = make_moons(200, noise=0.20)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y,
            cmap=plt.cm.Spectral)  #shows original scatter
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
    plt.subplot(2, 2, i + 1)
    plt.title('HiddenLayerSize%d' % nn_hdim)
    model = build_model(X, y, nn_hdim)
    plot_decision_boundary(lambda x: predict(model, x), X, y)
plt.show()

np.random.seed(0)
X, y = make_blobs(n_samples=100, centers=3, n_features=2, random_state=0)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
    plt.subplot(2, 2, i + 1)
    plt.title('HiddenLayerSize%d' % nn_hdim)
    model = build_model_691(X, y, nn_hdim)
    plot_decision_boundary(lambda x: predict(model, x), X, y)
plt.show()
Пример #2
0
logging.basicConfig(level=logging.INFO)

if __name__ == '__main__':
    logging.info('*** Initializing ***')

    if not os.path.isdir(Config.data_dir):
        os.mkdir(Config.data_dir)

    seed_everything(Config.seed)
    device = torch.device(Config.device)

    start_epoch = 0

    logging.info('Define Models')
    model = build_model(Config).to(device)
    tokenizer = Tokenizer.from_pretrained(Config.model_name)

    logging.info('Define Loss and Optimizer')
    criterion = LabelSmoothing(tokenizer.vocab_size,
                               pad_id=tokenizer.pad_token_id,
                               smoothing=Config.smoothing)
    _opt = optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)
    optimizer = get_optimizer(_opt, factor=Config.factor, warmup=Config.warmup)

    logging.info('Preparing training data')
    if Config.use_pickle:
        with open(f'{Config.pickle_path}', 'rb') as f:
            train_data = pickle.load(f)
    else:
        train_data = make_train_data_from_txt(Config, tokenizer)