Пример #1
0
def get_predictions(sepal_length_cm, sepal_width_cm, petal_length_cm,
                    petal_width_cm):

    classifier = ClassifierModel()
    classifier.load_pickle()
    predicted_class = classifier.run_model(
        [[sepal_length_cm, sepal_width_cm, petal_length_cm, petal_width_cm]])
    print(predicted_class)
    return predicted_class[0]
Пример #2
0
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB

from load_data import load_dataset
from model import ClassifierModel

warnings.filterwarnings("ignore")

if __name__ == '__main__':
    train_path = join(dirname(dirname(__file__)), "data", "train.xlsx")
    test_path = join(dirname(dirname(__file__)), "data", "test.xlsx")
    X_train, y_train = load_dataset(train_path)
    X_test, y_test = load_dataset(test_path)

    models = [
        ClassifierModel("Tfidf Bigram", TfidfVectorizer(ngram_range=(1, 2))),
        ClassifierModel("Tfidf Trigram", TfidfVectorizer(ngram_range=(1, 3))),
        ClassifierModel("Count Bigram", CountVectorizer(ngram_range=(1, 2))),
        ClassifierModel("Count Trigram", CountVectorizer(ngram_range=(1, 3)))
    ]

    for n in [2000, 5000, 10000, 15000, 20000]:
        model = ClassifierModel("Count Max Feature {}".format(n),
                                CountVectorizer(max_features=n))
        models.append(model)

    for n in [2000, 5000, 10000, 15000, 20000]:
        model = ClassifierModel("Count Max Feature {}".format(n),
                                TfidfVectorizer(max_features=n))
        models.append(model)
Пример #3
0
# Batch size defines how many images are in one datapoint
b_size = 4

testset = torchvision.datasets.CIFAR10(root='./data',
                                       train=False,
                                       download=True,
                                       transform=transform)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=b_size,
                                         shuffle=False,
                                         num_workers=0)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

# Load trained model
net = ClassifierModel()
net.load_state_dict(torch.load('./cifar_classifier.pth'))

# Iterate over dataset and calculate the accuracy of each class separately
predicted_class = [0] * 10
correct_class = [0] * 10
with torch.no_grad():
    for _, data in enumerate(testloader):
        # Load images and labels (batch_size at once)
        images, labels = data

        # todo: Evaluate model with current input
        outputs = net(images)

        # todo: Choose the predicted class as the one with the maximum response
        pred_class = torch.argmax(outputs, 1)
Пример #4
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--batch_size',
                        help='Batch size',
                        required=False,
                        type=int,
                        default=256)

    parser.add_argument('--pickle_file',
                        help='Pickle file',
                        required=False,
                        default='./traffic.pickle')

    parser.add_argument('--learning_rate',
                        help='Learning rate',
                        required=False,
                        type=float,
                        default=0.0001)

    parser.add_argument('--image_size',
                        help='Image size',
                        required=False,
                        type=int,
                        default=32)

    parser.add_argument('--num_classes',
                        help="Number of classes",
                        required=False,
                        type=int,
                        default=43)

    parser.add_argument('--color_channels',
                        help="Color channels",
                        required=False,
                        type=int,
                        default=1)

    parser.add_argument('--ckpt_dir',
                        help="Check point directory",
                        required=False,
                        default='./modelSave/')

    parser.add_argument('--num_iter_per_epoch',
                        help="Number of iterations per epoch",
                        required=False,
                        type=int,
                        default=1)

    parser.add_argument('--num_epochs',
                        help="Number of epochs",
                        required=False,
                        type=int,
                        default=1)

    parser.add_argument('--load_model',
                        help="Load model or train model from scratch",
                        required=False,
                        type=bool,
                        default=True)

    args = parser.parse_args()

    class config:
        batch_size = args.batch_size
        pickle_file = args.pickle_file
        learning_rate = args.learning_rate
        image_size = args.image_size
        num_classes = args.num_classes
        num_channels = args.color_channels
        num_iter_per_epoch = args.num_iter_per_epoch
        num_epochs = args.num_epochs
        checkpoint_dir = args.ckpt_dir
        load_model = args.load_model

    sess = tf.Session()

    data_loader = DataLoader(config=config)
    model = ClassifierModel(data_loader=data_loader, config=config)
    trainer = ClassifierTrainer(sess=sess,
                                model=model,
                                config=config,
                                logger=None,
                                dataLoader=data_loader)

    trainer.train()
    return