Esempio n. 1
0
def train(X, y, epochs=10, batch_size=16):
    dataset = IrisDataset(X, y)
    num_examples = len(dataset)
    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=batch_size,
                                         shuffle=True)
    print(X.shape[1])
    model = IrisNet(input_dim=X.shape[1])
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    criterion = torch.nn.CrossEntropyLoss()

    for epoch in range(1, epochs + 1):
        num_correct = 0
        for i, (inputs, labels) in enumerate(loader):
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            num_correct += (labels == outputs.argmax(1)).sum()
            loss.backward()
            optimizer.step()
        print(
            f"Finished: {epoch}, accuracy: {round(num_correct.float().numpy() / num_examples, 4) * 100}%"
        )

    return model
Esempio n. 2
0
def _main():
    dataset = IrisDataset()
    plot = Plot(dataset)
    for root in plot.roots:
        curdoc().add_root(root)

    curdoc().title = dataset.name
Esempio n. 3
0
    def test(cls):
        from dataset import IrisDataset
        from sklearn.model_selection import train_test_split

        iris_dataset = IrisDataset()
        x_train, x_test, y_train, y_test = \
            train_test_split(iris_dataset.data[:100],
                             iris_dataset.target[:100])

        svm = cls(kernel="linear", max_iteration=200)
        print(svm.train(x_train, y_train))
        print(svm.score(x_test, y_test))
Esempio n. 4
0
    else:
        device = torch.device("cpu")

    # device=torch.device("cpu") # cudnn报错,用cpu跑
    model = model_dict[args.model]
    model = model.to(device)
    filename = args.load
    if not os.path.exists(filename):
        print("model path not found !!!")
        exit(1)

    model.load_state_dict(torch.load(filename))
    model = model.to(device)
    model.eval()

    test_set = IrisDataset(filepath = 'Semantic_Segmentation_Dataset/',\
                                 split = 'test',transform = transform)

    testloader = DataLoader(test_set,
                            batch_size=args.bs,
                            shuffle=False,
                            num_workers=0)
    # windows下num_workers改成0,源代码为2
    counter = 0

    os.makedirs('test/labels/', exist_ok=True)
    os.makedirs('test/output/', exist_ok=True)
    os.makedirs('test/mask/', exist_ok=True)

    with torch.no_grad():
        for i, batchdata in tqdm(enumerate(testloader), total=len(testloader)):
            img, labels, index, x, y = batchdata
Esempio n. 5
0
#!/usr/bin/python
# -*- encoding: utf8 -*-

from math import exp, log2

import numpy as np
from dataset import IrisDataset

IRIS_DATASET = IrisDataset()


class LogisticRegressionClassifier:
    def __init__(self, learning_rate=0.01, max_iteration=200):
        self._learning_rate = learning_rate
        self._max_iteration = max_iteration
        self.weights = None
        self.classes = None

    @property
    def learning_rate(self):
        return self._learning_rate

    @learning_rate.setter
    def learning_rate(self, learning_rate):
        self._learning_rate = learning_rate

    @property
    def max_iteration(self):
        return self._max_iteration

    @max_iteration.setter
Esempio n. 6
0
        logger.write_summary(str(model.parameters))
    except:
        print("Torch summary not found !!!")

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           'min',
                                                           patience=5)

    criterion = CrossEntropyLoss2d()
    criterion_DICE = GeneralizedDiceLoss(softmax=True, reduction=True)
    criterion_SL = SurfaceLoss()

    Path2file = args.dataset
    train = IrisDataset(filepath=Path2file,
                        split='train',
                        transform=transform,
                        **kwargs)

    valid = IrisDataset(filepath=Path2file,
                        split='validation',
                        transform=transform,
                        **kwargs)

    trainloader = DataLoader(train,
                             batch_size=args.bs,
                             shuffle=True,
                             num_workers=args.workers)

    validloader = DataLoader(valid,
                             batch_size=args.bs,
                             shuffle=False,