Esempio n. 1
0
def main():
    BATCH_SIZE = 64
    EPOCHS = 3

    # Load data
    ds = dataset.load("mnist/fashion-mnist")

    # transform into Tensorflow dataset
    # max_text_len is an optional argument that fixes the maximum length of text labels
    ds = ds.to_tensorflow(max_text_len=15)

    # converting ds so that it can be directly used in model.fit
    ds = ds.map(lambda x: to_model_fit(x))

    # Splitting back into the original train and test sets
    train_dataset = ds.take(60000)
    test_dataset = ds.skip(60000)

    train_dataset = train_dataset.batch(BATCH_SIZE)
    test_dataset = test_dataset.batch(BATCH_SIZE)

    model = create_CNN()
    # model.summary()
    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.fit(train_dataset,
              epochs=EPOCHS,
              validation_data=test_dataset,
              validation_steps=1)
Esempio n. 2
0
def main():
    BATCH_SIZE = 64
    EPOCHS = 3

    optimizer = Adam()
    train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
    test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
    loss_fn = SparseCategoricalCrossentropy()

    # Load data
    ds = dataset.load("abhinavtuli/fashion-mnist")

    # transform into Tensorflow dataset
    ds = ds.to_tensorflow()

    # Splitting back into the original train and test sets
    train_dataset = ds.take(60000)
    test_dataset = ds.skip(60000)

    train_dataset = train_dataset.batch(BATCH_SIZE)
    test_dataset = test_dataset.batch(BATCH_SIZE)

    model = create_CNN()
    # model.summary()

    for epoch in range(EPOCHS):
        print("\nStarting Training Epoch {}".format(epoch))
        train(model, train_dataset, optimizer, loss_fn, train_acc_metric)
        print("Training Epoch {} finished\n".format(epoch))
        test(model, test_dataset, test_acc_metric)
Esempio n. 3
0
def main():
    EPOCHS = 3
    BATCH_SIZE = 64
    LEARNING_RATE = 0.01
    MOMENTUM = 0.5
    torch.backends.cudnn.enabled = False
    random_seed = 2
    torch.manual_seed(random_seed)

    # Load data
    ds = dataset.load("mnist/fashion-mnist")

    # Transform into pytorch
    # max_text_len is an optional argument that sets the maximum length of text labels, default is 30
    ds = ds.to_pytorch(max_text_len=15)

    # Splitting back into the original train and test sets, instead of random split
    train_dataset = torch.utils.data.Subset(ds, range(60000))
    test_dataset = torch.utils.data.Subset(ds, range(60000, 70000))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=BATCH_SIZE,
                                               collate_fn=ds.collate_fn)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=BATCH_SIZE,
                                              collate_fn=ds.collate_fn)

    model = CNN()
    optimizer = optim.SGD(model.parameters(),
                          lr=LEARNING_RATE,
                          momentum=MOMENTUM)

    for epoch in range(EPOCHS):
        print("Starting Training Epoch {}".format(epoch))
        train(model, train_loader, optimizer)
        print("Training Epoch {} finished\n".format(epoch))
        test(model, test_loader)

    # sanity check to see outputs of model
    for batch in test_loader:
        print("\nNamed Labels:", dataset.get_text(batch["named_labels"]))
        print("\nLabels:", batch["labels"])

        data = batch["data"]
        data = torch.unsqueeze(data, 1)

        output = model(data)
        pred = output.data.max(1)[1]
        print("\nPredictions:", pred)
        break
Esempio n. 4
0
def main():
    BATCH_SIZE = 64
    EPOCHS = 3

    optimizer = Adam()
    train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
    test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
    loss_fn = SparseCategoricalCrossentropy()

    # Load data
    ds = dataset.load("mnist/fashion-mnist")

    # transform into Tensorflow dataset
    # max_text_len is an optional argument that sets the maximum length of text labels, default is 30
    ds = ds.to_tensorflow(max_text_len=15)

    # Splitting back into the original train and test sets
    train_dataset = ds.take(60000)
    test_dataset = ds.skip(60000)

    train_dataset = train_dataset.batch(BATCH_SIZE)
    test_dataset = test_dataset.batch(BATCH_SIZE)

    model = create_CNN()
    # model.summary()

    for epoch in range(EPOCHS):
        print(f"\nStarting Training Epoch {epoch}")
        train(model, train_dataset, optimizer, loss_fn, train_acc_metric)
        print(f"Training Epoch {epoch} finished\n")
        test(model, test_dataset, test_acc_metric)

    # sanity check to see outputs of model
    for batch in test_dataset:
        print("\nNamed Labels:", dataset.get_text(batch["named_labels"]))
        print("\nLabels:", batch["labels"])

        output = model(tf.expand_dims(batch["data"], axis=3), training=False)
        print(type(output))
        pred = np.argmax(output, axis=-1)
        print("\nPredictions:", pred)
        break
Esempio n. 5
0
def main():
    EPOCHS = 3
    BATCH_SIZE = 64
    LEARNING_RATE = 0.01
    MOMENTUM = 0.5
    torch.backends.cudnn.enabled = False
    random_seed = 2
    torch.manual_seed(random_seed)

    # Load data
    ds = dataset.load("abhinavtuli/fashion-mnist")

    # Transform into pytorch
    ds = ds.to_pytorch()

    # Splitting back into the original train and test sets, instead of random split
    train_dataset = torch.utils.data.Subset(ds, range(60000))
    test_dataset = torch.utils.data.Subset(ds, range(60000, 70000))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=BATCH_SIZE,
                                               collate_fn=ds.collate_fn)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=BATCH_SIZE,
                                              collate_fn=ds.collate_fn)

    model = CNN()
    optimizer = optim.SGD(model.parameters(),
                          lr=LEARNING_RATE,
                          momentum=MOMENTUM)

    for epoch in range(EPOCHS):
        print("Starting Training Epoch {}".format(epoch))
        train(model, train_loader, optimizer)
        print("Training Epoch {} finished\n".format(epoch))
        test(model, test_loader)
Esempio n. 6
0
from hub import dataset

# Load data
ds = dataset.load("arenbeglaryan/vocsegmentation")

ds = ds.to_tensorflow().batch(8)

# Iterate over the data
for batch in ds:
    print(batch["data"], batch["labels"])
Esempio n. 7
0
File: load_tf.py Progetto: 40a/Hub-1
from hub import dataset

# Load data
ds = dataset.load("mnist/mnist")

# tansform into Tensorflow dataset
ds = ds.to_tensorflow().batch(8)

# Iterate over the data
for batch in ds:
    print(batch["data"], batch["labels"])
Esempio n. 8
0
import torch
from hub import dataset

# Load data
ds = dataset.load("abhinav/aerial-omdena")

# Transform into pytorch
ds = ds.to_pytorch()
ds = torch.utils.data.DataLoader(
    ds, batch_size=2, collate_fn=ds.collate_fn
)

# Iterate over the data
for batch in ds:
    print(batch["image_lat"])
    print(batch["image_lon"])
    print(batch["cluster_lat"])
    print(batch["cluster_lon"])
    print(batch["cons_pc"])
    print(batch["nightlights"])
    print(batch["nightlights_bin"])
    print(batch["image"])