Exemple #1
0
def create_discriminator():
    """
    Makes the discriminator network
    :return: The discriminator network
    """
    # We are using the SqueezeNet because it is fairly resource-light and GANs can already be hard to train
    disc_net = SqueezeNet(input_width=input_size[0], classes=2)

    # Compile with loss
    disc_net.compile(loss="binary_crossentropy",
                     optimizer=create_adam(),
                     metrics=["accuracy"])

    return disc_net
Exemple #2
0
def train(img_local_path, label_path, model_object_key):
    model = SqueezeNet(weights='imagenet')
    img = image.load_img(img_local_path, target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    label_file = open(label_path)
    y = np.array([label_file.read()])
    label_file.close()

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    history = model.fit(x, y)
    model.summary()
    model.save_weights(tmp_path + model_object_key)
    return history.history
def main():
    pl_train, pl_labels = get_dataset('./Pan_Licence/')
    pl_labels = to_categorical(pl_labels, num_classes=36)

    x_train, x_val, y_train, y_val = train_test_split(pl_train,
                                                      pl_labels,
                                                      test_size=0.2,
                                                      random_state=2064)

    tb = TensorBoard(log_dir='./logs/Squeezenet', write_graph=True)

    model = SqueezeNet()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    print(model.summary())

    history = model.fit(x_train,
                        y_train,
                        batch_size=32,
                        epochs=5,
                        validation_split=0.1,
                        shuffle=True,
                        callbacks=[tb])

    ## Save Model
    json_model = model.to_json()
    with open('model_squeezenet.json', 'w') as f:
        f.write(json_model)
    model.save_weights('model_squeezenet.h5')
    print('Model Saved')

    print('Evaluating Model')
    predict = model.evaluate(x=x_val, y=y_val, batch_size=1)

    print('Score', predict[1] * 100.00)
    print('Loss', predict[0])
Exemple #4
0
class DNNModel:
    def __init__(self, image_path):
        self.IMAGE_SIZE = 64
        self.data = []
        self.labels = []
        self.model = self.build_model()

        if image_path is not None:
            self.image_path = image_path
        else:
            self.image_path = "/home/madi/deeplearning/raspberry-pi/datasets"
        pass

    def gen_training_image_set(self):
        imagePaths = os.listdir(self.image_path)
        # loop over the input images
        for imagePath in imagePaths:
            # load the image, pre-process it, and store it in the data list
            imagePath = self.image_path + "/" + imagePath
            print imagePath
            image = cv2.imread(imagePath)
            image = cv2.resize(image, (self.IMAGE_SIZE, self.IMAGE_SIZE))
            image = img_to_array(image)

            self.data.append(image)

            # extract the class label from the image path and update the
            # labels list]
            if "left" in imagePath.split(os.path.sep)[-2]:
                label = 1
            elif "right" in imagePath.split(os.path.sep)[-2]:
                label = 2
            else:
                label = 0

            self.labels.append(label)

        # scale the raw pixel intensities to the range [0, 1]
        self.data = np.array(self.data, dtype="float") / 255.0
        self.labels = np.array(self.labels)

    def add_training_sample(self, data, label):
        image = cv2.resize(data, (self.IMAGE_SIZE, self.IMAGE_SIZE))
        image = img_to_array(image)
        self.data.append(image)
        self.labels.append(label)

    def scale_and_norm_training_samples(self):
        # scale the raw pixel intensities to the range [0, 1]
        self.data = np.array(self.data, dtype="float") / 255.0
        self.labels = np.array(self.labels)

    def build_model(self):
        self.model = SqueezeNet(include_top=True,
                                weights=None,
                                classes=3,
                                input_shape=(self.IMAGE_SIZE, self.IMAGE_SIZE,
                                             3))
        self.model.summary()
        opt = Adam()
        self.model.compile(loss="binary_crossentropy",
                           optimizer=opt,
                           metrics=["accuracy"])
        return self.model

    def train(self):
        # split train and test set
        (trainX, testX, trainY, testY) = train_test_split(self.data,
                                                          self.labels,
                                                          test_size=0.25,
                                                          random_state=42)

        # convert the labels from integers to vectors
        trainY = to_categorical(trainY, num_classes=3)
        testY = to_categorical(testY, num_classes=3)

        print trainX.shape
        print trainY.shape
        self.model.fit(trainX,
                       trainY,
                       batch_size=1,
                       epochs=50,
                       verbose=1,
                       validation_data=(testX, testY))
        self.test()
        pass

    def predict(self, img_frame):
        img_frame = cv2.resize(img_frame, (self.IMAGE_SIZE, self.IMAGE_SIZE))
        img_frame = img_to_array(img_frame)
        data = np.array([img_frame])
        # scale the raw pixel intensities to the range [0, 1]
        data = np.array(data, dtype="float") / 255.0

        ret = self.model.predict(data)

        if len(ret) > 0:
            return ret[0]
        pass

    def save_model(self):
        self.model.save("greenball_squeezenet_local.h5")
        pass

    def load_model(self, path):
        self.model = load_model("greenball_squeezenet_local.h5")
        pass

    def test(self):
        cnt = 0
        for i in xrange(len(self.data)):
            ret = self.model.predict(self.data[i])
            pred = np.argmax(ret)
            if pred == self.labels[i]:
                cnt += 1
        print "total correct number is %d" % cnt
Exemple #5
0
# images, classes = zip(*images_classes)
# [images, classes], [x, y] = mnist.load_data()
# images = images[0:500]
# classes = classes[0:500]

# images = np.array([cv2.resize(cv2.cvtColor(im, cv2.COLOR_GRAY2RGB), (227, 227)) for im in images])
# images = np.array(images)
# print images.shape
# classes = to_categorical(classes, nb_classes=nr_classes)

print('Loading model..')
model = SqueezeNet(nb_classes, input_shape=input_shape)
adam = Adam(lr=0.040)
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss="categorical_crossentropy",
              optimizer='adam',
              metrics=['accuracy'])
if os.path.isfile(weights_file):
    print('Loading weights: %s' % weights_file)
    model.load_weights(weights_file, by_name=True)

print('Fitting model')
# model.fit(images, classes, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_split=0.2, initial_epoch=0)
model.fit_generator(training_data,
                    samples_per_epoch=samples_per_epoch,
                    validation_data=validation_data,
                    nb_val_samples=nb_val_samples,
                    nb_epoch=nb_epoch,
                    verbose=1,
                    initial_epoch=initial_epoch)
print("Finished fitting model")
Exemple #6
0
    logits = y_true[:, 256:]
    y_soft = K.softmax(logits / temperature)
    y_pred_soft = y_pred[:, 256:]
    return logloss(y_soft, y_pred_soft)


# # Train

# In[17]:

# lambda_const = 0.2

model.compile(
    optimizer=optimizers.SGD(lr=learning_rate,
                             momentum=momentum,
                             nesterov=True),
    #     optimizer=optimizers.Adam(lr=0.005, decay=0.01),
    loss=lambda y_true, y_pred: knowledge_distillation_loss(
        y_true, y_pred, lambda_const, temperature),
    metrics=[accuracy, top_5_accuracy, categorical_crossentropy, soft_logloss])

# In[18]:

callbacks = [
    EarlyStopping(monitor='val_accuracy', patience=4, min_delta=0.01),
    ReduceLROnPlateau(monitor='val_accuracy',
                      factor=0.1,
                      patience=2,
                      epsilon=0.007)
]

# log progress to AML workspace
Exemple #7
0
if transfer_learning:
    trainable = False
else:
    trainable = True

model = SqueezeNet(weight_decay=weight_decay, image_size=299, trainable=True)
model.count_params()

# # Training

# In[6]:

model.compile(
    #     optimizer=optimizers.Adam(lr=0.005, decay=0.01),
    optimizer=optimizers.SGD(lr=learning_rate,
                             momentum=momentum,
                             nesterov=True),
    loss='categorical_crossentropy',
    metrics=['accuracy', 'top_k_categorical_accuracy'])

# In[ ]:

callbacks = [
    EarlyStopping(monitor='val_acc', patience=4, min_delta=0.01),
    ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=2, epsilon=0.007)
]

# log progress to AML workspace
if remote_execution:

    class LogRunMetrics(Callback):
Exemple #8
0

# List comprehensions. Create a list of two tuples with (images, class).
print('Loading images..')
paths = [
    os.path.join(subdir, f) for subdir, dirs, files in os.walk(images_dir)
    for f in files if f.endswith('.jpg')
]

images = [load_image(path) for path in paths]
nr_classes = len(decode)
images = np.array(images)

print('Loading model..')
model = SqueezeNet(nr_classes)
model.compile(loss="categorical_crossentropy", optimizer="adam")
if os.path.isfile(weights_file):
    print('Loading weights...')
    model.load_weights(weights_file)

print("Classifying images...")
# predictions = model.predict(images, batch_size=100, verbose=1)
# print('Predicted %s images' % len(predictions))
for i in xrange(len(images)):
    img = np.expand_dims(images[i], axis=0)
    res = model.predict(img)
    results = res[0].argsort()[-5:][::-1]
    print('%s: ' % paths[i])
    for j in xrange(len(results)):
        result = decode[results[j]]
        text = '%.3f: %s' % (res[0][results[j]], result)
Exemple #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--train-dir', default='data/train')
    parser.add_argument('--test-dir', default='data/test')
    parser.add_argument('--logdir', default='logs')
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--epochs', type=int, required=True)
    parser.add_argument('--num-classes', type=int, required=True)
    parser.add_argument('--checkpoint-pattern',
                        default='weights-{epoch:d}-{val_acc:.4f}.hdf5')
    parser.add_argument('--learning-rate', type=float, default=1e-4)
    parser.add_argument('--restore')
    args = parser.parse_args()

    # count samples
    train_files = count_files(args.train_dir, '.png')
    print('Found %d train files.' % train_files)
    test_files = count_files(args.test_dir, '.png')
    print('Found %d test files.' % test_files)

    if args.restore:
        model = SqueezeNet(weights=None, classes=args.num_classes)
        model.load_weights(args.restore)
    else:
        model = SqueezeNet(weights='imagenet', classes=args.num_classes)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adam(lr=args.learning_rate),
                  metrics=['accuracy'])

    train_datagen = ImageDataGenerator(
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        preprocessing_function=preprocess_single)

    test_datagen = ImageDataGenerator(preprocessing_function=preprocess_single)

    train_generator = train_datagen.flow_from_directory(
        args.train_dir,
        target_size=(SIZE, SIZE),
        batch_size=args.batch_size,
        class_mode='categorical')

    test_generator = test_datagen.flow_from_directory(
        args.test_dir,
        target_size=(SIZE, SIZE),
        batch_size=args.batch_size,
        class_mode='categorical')

    checkpoint = ModelCheckpoint(args.checkpoint_pattern,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    tensorboard = TensorBoard(log_dir=args.logdir,
                              histogram_freq=0,
                              batch_size=args.batch_size,
                              write_graph=True,
                              write_grads=False,
                              write_images=False,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)
    callbacks = [checkpoint, tensorboard]

    model.fit_generator(train_generator,
                        steps_per_epoch=(train_files // args.batch_size),
                        epochs=args.epochs,
                        validation_data=test_generator,
                        validation_steps=(test_files // args.batch_size),
                        callbacks=callbacks)