コード例 #1
0
def trainModel(robot):
    classifier = ImageClassifier()

    (train_raw, train_labels) = classifier.load_data_from_folder('./photos/')#load_sorted_data('./photos')
    (test_raw, test_labels) = classifier.load_data_from_folder('./test/')

    # convert images into features
    train_data = np.array(classifier.extract_image_features(train_raw))
    test_data = np.array(classifier.extract_image_features(test_raw))

    print('entering train classifier')
    # train model and test on training data
    classifier.train_classifier(train_data, train_labels)
    predicted_labels_for_train = classifier.predict_labels(train_data)
    train_accuracy = metrics.accuracy_score(train_labels, predicted_labels_for_train)

    print('entering test classifier')

    predicted_labels_for_test = classifier.predict_labels(test_data)
    test_accuracy = metrics.accuracy_score(test_labels, predicted_labels_for_test)
    #if test_accuracy >= 1.0:
    print("Training results")
    print("=============================")
    print("Confusion Matrix:\n",metrics.confusion_matrix(train_labels, predicted_labels_for_train))
    print("Accuracy: ", train_accuracy)
    print("F1 score: ", metrics.f1_score(train_labels, predicted_labels_for_train, average='micro'))

    # test model
    print("\Testing results")
    print("=============================")
    print("Confusion Matrix:\n",metrics.confusion_matrix(test_labels, predicted_labels_for_test))
    print ("Test Labels: ", test_labels)
    print("Accuracy: ", test_accuracy)
    print("F1 score: ", metrics.f1_score(test_labels, predicted_labels_for_test, average='micro'))
    print("\n")
    if test_accuracy == 1.0:
        print('''
        ||~~~~~~~~~~~~~~~~~~
        ||~~~~~~~~~~~~~~~~~~
        ||~~~~~~VICTORY~~~~~
        ||~~~~~~~~~~~~~~~~~~
        ||~~~~~~~~~~~~~~~~~~
        ||
        ||
        ||
        ||
        ||
                ''')
    joblib.dump(classifier, 'classifier.pkl')
    return classifier
コード例 #2
0
def main():
    """
    Set up arguments to be used for inference
    """
    parser = argparse.ArgumentParser(
        description=
        'Trains a new network on a dataset and save the model as a checkpoint.'
    )
    parser.add_argument('image_path', type=str, help='Image file')
    parser.add_argument('checkoint', type=str, help='Checkpoint file')
    parser.add_argument('--top_k',
                        dest='top_k',
                        metavar='T',
                        type=int,
                        default=1,
                        help='Top K most likely classes')
    parser.add_argument('--category_names',
                        dest='category_names',
                        metavar='C',
                        type=str,
                        default='cat_to_name.json',
                        help='Mapping of categories to real names')
    parser.add_argument('--gpu',
                        dest='gpu',
                        type=bool,
                        nargs='?',
                        default=False,
                        const=True,
                        help='Use GPU for inference')

    args = parser.parse_args()
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'
                          ) if args.gpu else 'cpu'

    # load category mapping
    with open(args.category_names, 'r') as f:
        cat_to_name = json.load(f)

    # load the model
    model = ImageClassifier(device)
    model.load(args.checkoint)

    # predict
    predictions = model.predict(process_image(args.image_path), args.top_k,
                                cat_to_name)

    for name, prob in predictions:
        print(f'{name}: {prob:.2f}%')
コード例 #3
0
ファイル: service.py プロジェクト: Mixser/pytorch-example
    async def process_image(self, image_bytes):
        loop = asyncio.get_running_loop()

        classifier = ImageClassifier(self.model, self.classes_map)

        # run in separate process, GIL will work, but in future we can change it for another executor
        result = await loop.run_in_executor(None, classifier.classify,
                                            image_bytes)

        prod, class_ = result

        if not class_:
            return {'result': None}

        return {'result': class_[1]}
コード例 #4
0
ファイル: main.py プロジェクト: totoropy/numbers_recognition
from classifier import ImageClassifier

model_name = '0025_RO4F'

# train a model
ic = ImageClassifier()
ic.set_classes(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
ic.set_model_name(model_name)
ic.set_train_data_path('data/{}'.format(model_name))
ic.train(2000)

# use the model to classify images from a different folder
ic.predict('data/{}/predict/'.format(model_name))
ic.print_results()

# create an html page with a graphic report
ic.print_html_report()
コード例 #5
0
ファイル: train.py プロジェクト: uzmafiza24/image_classifier
def main():
    """
    Set up arguments to be used in the training
    """
    parser = argparse.ArgumentParser(
        description=
        'Trains a new network on a dataset and save the model as a checkpoint.'
    )
    parser.add_argument('data_dir', type=str, help='Dataset directory')
    parser.add_argument('--save_dir',
                        dest='save_dir',
                        metavar='S',
                        type=str,
                        help='Checkpoint directory')
    parser.add_argument('--arch',
                        dest='arch',
                        type=str,
                        default='vgg16',
                        choices=[
                            'resnet18', 'resnet34', 'alexnet', 'vgg11',
                            'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16',
                            'vgg16_bn', 'vgg19', 'vgg19_bn'
                        ],
                        help='Network architecture')
    parser.add_argument('--learning_rate',
                        dest='learning_rate',
                        metavar='LR',
                        type=float,
                        default=0.001,
                        help='Learning rate')
    parser.add_argument('--hidden_units',
                        dest='hidden_units',
                        metavar='H',
                        type=int,
                        default=256,
                        help='List of hidden units neurons')
    parser.add_argument(
        '--dropout',
        dest='dropout',
        metavar='D',
        type=float,
        default=0.2,
        help=
        'Dropout for the hidden layers (one less than the number of hidden units)'
    )
    parser.add_argument('--epochs',
                        dest='epochs',
                        metavar='E',
                        type=int,
                        default=25,
                        help='Number of epochs to run the training for')
    parser.add_argument('--n_classes',
                        dest='n_classes',
                        metavar='N',
                        type=int,
                        default=102,
                        help='Number of classes to classify')
    parser.add_argument('--gpu',
                        dest='gpu',
                        type=bool,
                        nargs='?',
                        default=False,
                        const=True,
                        help='Use GPU for training')

    args = parser.parse_args()
    args.save_dir = args.save_dir if args.save_dir else args.data_dir
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu'
                          ) if args.gpu else 'cpu'

    # create the model
    model = ImageClassifier(device)
    model.compile(args.arch, args.hiden_units, args.dropout, args.n_classes,
                  args.learning_rate)

    # Load data
    dataloaders, image_datasets = DataProcessor(args.data_dir).create_loaders()

    model.train(dataloaders, args.epochs, image_datasets)
    model.save(f'{args.arch}_checkpoint.pth')
コード例 #6
0
def train(args):

    args = parse_arguments()
    EPOCHS = 100
    INIT_LR = 1e-3
    BS = 32
    IMAGE_DIMS = (96, 96, 3)

    data = []
    labels = []

    print("[INFO] loading images...")
    imagePaths = sorted(list(paths.list_images(args.dataset)))
    random.seed(42)
    random.shuffle(imagePaths)
    # loop over the input images
    for imagePath in imagePaths:
        # load the image, pre-process it, and store it in the data list
        try:
            image = cv2.imread(imagePath)
            image = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))
            image = img_to_array(image)
            data.append(image)

            # extract the class label from the image path and update the
            # labels list
            label = imagePath.split(os.path.sep)[-2]
            labels.append(label)
        except:
            pass

    data = np.array(data, dtype="float") / 255.0
    labels = np.array(labels)

    # binarize the labels
    lb = LabelBinarizer()
    labels = lb.fit_transform(labels)

    # partition the data into training and testing splits using 80% of
    # the data for training and the remaining 20% for testing
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.2,
                                                      random_state=42)

    aug = ImageDataGenerator(rotation_range=25,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")

    # initialize the model
    print("[INFO] compiling model...")
    model = ImageClassifier(IMAGE_DIMS[1], IMAGE_DIMS[0],\
     channels=IMAGE_DIMS[2],classes=len(lb.classes_)).build()
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
                            validation_data=(testX, testY),
                            steps_per_epoch=len(trainX) // BS,
                            epochs=EPOCHS,
                            verbose=1)

    # save the model to disk
    print("[INFO] serializing network...")
    model.save(args.model)

    # save the label binarizer to disk
    print("[INFO] serializing label binarizer...")
    f = open(args.labelbin, "wb")
    f.write(pickle.dumps(lb))
    f.close()

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="upper left")
    plt.savefig(args["plot"])