image = np.rollaxis(image, 2, 0)
    x_valid.append(image)
    y_valid.append([1, 0])  # 0 = no, 1 = yes

for filename in glob.glob(valid_path_yes + '*'):
    image = cv2.imread(filename)
    image = np.asarray(image)
    image = cv2.resize(image, (224, 224))
    image = np.rollaxis(image, 2, 0)
    x_valid.append(image)
    y_valid.append([0, 1])  # 0 = no, 1 = yes

x_valid = np.array(x_valid)
y_valid = np.array(y_valid)

model = create_googlenet()

from keras.optimizers import SGD  #Adam
opt = SGD(lr=0.001)
model.compile(optimizer=opt,
              loss=keras.losses.categorical_crossentropy,
              metrics=['accuracy'])

#from keras.callbacks import ModelCheckpoint, EarlyStopping
#checkpoint = ModelCheckpoint("./models/googlenet_{epoch:02d}-{val_accuracy:.2f}.h5", monitor='val_acc', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=5)
#early = EarlyStopping(monitor='val_acc', min_delta=0, patience=20, verbose=1, mode='auto')

datagen = ImageDataGenerator()
n_epochs = 100000

for e in range(n_epochs):
Exemplo n.º 2
0
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-d",
                    "--dataset",
                    required=True,
                    help="path to input dataset directory")

    args = vars(ap.parse_args())

    #dataset_path = args['dataset']
    dataset_directory = args['dataset']
    train_results_direcotry = os.path.join(
        dataset_directory,
        datetime.now().strftime("%d-%m-%Y_%H-%M"))
    os.mkdir(train_results_direcotry)
    print(train_results_direcotry)

    model_path = os.path.join(train_results_direcotry,
                              'parking_classification.model')
    plot_path = os.path.join(train_results_direcotry, 'plot.png')
    train_details_path = os.path.join(train_results_direcotry, 'details.json')
    csv_log_path = os.path.join(train_results_direcotry, 'epochs_log.csv')
    csv_lr_path = os.path.join(train_results_direcotry, 'learning_rates.csv')
    #with open(dataset_path, "r") as infile:
    #data = json.load(infile)

    # initialize the model
    print("[INFO] compiling model...")
    # model = LeNet.build(width=70, height=70, depth=3, classes=2)
    #model, architecture_name = mAlexNet.build(width=IMAGE_HEIGHT, height=IMAGE_WIDTH, depth=IMAGE_CHANNEL, classes=NUM_CLASSES)
    model, architecture_name = create_googlenet(width=IMAGE_HEIGHT,
                                                height=IMAGE_WIDTH,
                                                depth=IMAGE_CHANNEL,
                                                classes=NUM_CLASSES)
    opt = Adam(lr=INIT_LR, beta_1=0.9, beta_2=0.999, epsilon=10e-8, decay=0.05)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    image_gen = ImageDataGenerator(rescale=1. / 255)  #, rotation_range=45,
    #width_shift_range=0.1,
    #height_shift_range=0.1,
    #shear_range=0.01,
    #zoom_range=[0.9, 1.25],
    #horizontal_flip=True,
    #vertical_flip=True,
    #fill_mode='reflect')
    #data_format='channels_last',
    #brightness_range=[0.5, 1.5])
    image_gen_val = ImageDataGenerator(rescale=1. / 255)

    df_train = pd.read_csv(
        os.path.join(dataset_directory, 'data_paths_train.csv'))
    df_test = pd.read_csv(
        os.path.join(dataset_directory, 'data_paths_test.csv'))
    print(df_train)
    print(df_test)
    train_generator = image_gen.flow_from_dataframe(dataframe=df_train,
                                                    x_col="path",
                                                    y_col="y",
                                                    directory=None,
                                                    class_mode="categorical",
                                                    target_size=(IMAGE_WIDTH,
                                                                 IMAGE_HEIGHT),
                                                    batch_size=BATCH_SIZE)
    test_generator = image_gen_val.flow_from_dataframe(
        dataframe=df_test,
        x_col="path",
        y_col="y",
        class_mode="categorical",
        directory=None,
        target_size=(IMAGE_WIDTH, IMAGE_HEIGHT),
        batch_size=BATCH_SIZE)

    # train the network
    print("[INFO] training network...")
    # with open(dataset_path, "rb") as fp:  # Unpickling
    #		data = pickle.load(fp)
    #with open(dataset_path, "r") as infile:
    #data = json.load(infile)

    print(model.summary())
    early_stop = EarlyStopping(monitor='val_loss', min_delta=0.05, patience=2)
    history = LossHistory()
    printlr = PrintLR()
    lrate = LearningRateScheduler(step_decay)
    callbacks = [
        CSVLogger(filename=csv_log_path, separator=',', append=False), printlr
    ]  #, history, lrate]
    STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size
    STEP_SIZE_VALID = test_generator.n // test_generator.batch_size
    H = model.fit_generator(generator=train_generator,
                            validation_data=test_generator,
                            steps_per_epoch=STEP_SIZE_TRAIN,
                            validation_steps=STEP_SIZE_VALID,
                            epochs=EPOCHS,
                            verbose=1,
                            callbacks=callbacks)
    # save the model to disk
    print("[INFO] serializing network...")
    model.save(model_path)

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    print(H.history)
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy on Ocuppied/Empty")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig(plot_path)

    details = {
        'epochs': str(EPOCHS),
        'batch_size': str(BATCH_SIZE),
        'image_size': "{}x{}".format(IMAGE_WIDTH, IMAGE_HEIGHT),
        'arquitecture': architecture_name,
        'augmented_data': 'True'
    }
    with open(os.path.join(train_details_path), 'w') as outfile:
        json.dump(details, outfile)

    with open(csv_lr_path, 'w', newline='') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        wr.writerow(learning_rates)
Exemplo n.º 3
0
    image = image.resize((224, 224))
    # image.show()
    image = img_to_array(image)
    image = imagenet_utils.preprocess_input(image)
    image = np.expand_dims(image, axis=0)
    return image


if __name__ == '__main__':
    images = []
    for index in range(1049, 1125):
        image = Image.open(f'tvsum_data/tvsum_data/frame{index}.jpg')
        image = preprocess_image(image)
        images.append(image)

    model = create_googlenet('googlenet_weights.h5')

    labels = np.loadtxt('synset_words.txt', str, delimiter='\t')

    outputs = [model.predict(image) for image in images]

    for output in outputs:
        predicted_label = np.argmax(output[3])
        predicted_class_name = labels[predicted_label]
        print('Predicted Class: ', predicted_label, ', Class Name: ',
              predicted_class_name)

    # for index in range(3):
    #     print('Cosine similarity 1, 2: ', cosine_similarity(outputs[0][index + 1], outputs[1][index + 1]))
    #     print('Cosine similarity 1, 3: ', cosine_similarity(outputs[0][index + 1], outputs[2][index + 1]))
    #     print('Cosine similarity 1, 4: ', cosine_similarity(outputs[0][index + 1], outputs[3][index + 1]))
Exemplo n.º 4
0
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-d",
                    "--dataset",
                    required=True,
                    help="path to input dataset")

    args = vars(ap.parse_args())

    dataset_path = args['dataset']
    #train_results_direcotry = os.path.dirname(os.path.abspath(dataset_path))
    train_results_direcotry = os.path.join(
        dataset_path,
        datetime.now().strftime("%d-%m-%Y_%H-%M"))
    print(train_results_direcotry)
    model_path = os.path.join(train_results_direcotry,
                              'parking_classification.model')
    plot_path = os.path.join(train_results_direcotry, 'plot.png')
    train_details_path = os.path.join(train_results_direcotry, 'details.json')

    # initialize the model
    print("[INFO] compiling model...")
    # model = LeNet.build(width=70, height=70, depth=3, classes=2)
    #model, architecture_name = mAlexNet.build(width=IMAGE_HEIGHT, height=IMAGE_WIDTH, depth=IMAGE_CHANNEL, classes=NUM_CLASSES)
    model, architecture_name = create_googlenet(width=IMAGE_HEIGHT,
                                                height=IMAGE_WIDTH,
                                                depth=IMAGE_CHANNEL,
                                                classes=NUM_CLASSES)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    image_gen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=90,
                                   horizontal_flip=True,
                                   vertical_flip=True)
    traindf = pd.read_csv(os.path.join(dataset_path, 'data_paths_train.csv'))
    traindf_aug = pd.read_csv(os.path.join(dataset_path, 'data_paths_aug.csv'))
    traindf_extended = pd.concat([traindf, traindf_aug])
    train_generator = image_gen.flow_from_dataframe(dataframe=traindf,
                                                    directory=None,
                                                    x_col='path',
                                                    y_col='y',
                                                    shuffle=True,
                                                    has_ext=True,
                                                    class_mode="categorical",
                                                    target_size=(IMAGE_HEIGHT,
                                                                 IMAGE_WIDTH),
                                                    batch_size=BATCH_SIZE)

    for y in traindf.columns:
        print("Column: {} Type: {}".format(y, traindf[y].dtype))

    image_gen2 = ImageDataGenerator(rescale=1. / 255)
    testdf = pd.read_csv(os.path.join(dataset_path, 'data_paths_test.csv'))
    print("The size of training is: {} and test is: {}".format(
        traindf.shape, testdf.shape))
    test_generator = image_gen2.flow_from_dataframe(dataframe=testdf,
                                                    directory=None,
                                                    x_col='path',
                                                    y_col='y',
                                                    shuffle=True,
                                                    has_ext=True,
                                                    class_mode="categorical",
                                                    target_size=(IMAGE_HEIGHT,
                                                                 IMAGE_WIDTH),
                                                    batch_size=BATCH_SIZE)

    # train the network
    print("[INFO] training network...")
    # with open(dataset_path, "rb") as fp:  # Unpickling

    #print(model.summary())

    STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size
    STEP_SIZE_VALID = test_generator.n // test_generator.batch_size
    H = model.fit_generator(generator=train_generator,
                            validation_data=test_generator,
                            steps_per_epoch=STEP_SIZE_TRAIN,
                            validation_steps=STEP_SIZE_VALID,
                            epochs=EPOCHS,
                            verbose=1)
    os.mkdir(train_results_direcotry)
    # save the model to disk
    print("[INFO] serializing network...")
    model.save(model_path)

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy on Ocuppied/Empty")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig(plot_path)

    details = {
        'epochs': str(EPOCHS),
        'batch_size': str(BATCH_SIZE),
        'image_size': "{}x{}".format(IMAGE_WIDTH, IMAGE_HEIGHT),
        'arquitecture': architecture_name
    }
    with open(os.path.join(train_details_path), 'w') as outfile:
        json.dump(details, outfile)
    def gnet(self, params):
        model = create_googlenet()
        sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(optimizer=sgd, loss='categorical_crossentropy')

        return model
Exemplo n.º 6
0
    def _configure_network(self, build=True):

        network = self.config['network']
        type_, weights = network['type'].lower(), network.get('weights', None)
        fine_tuning = " with pre-trained weights '{}'".format(
            weights) if weights else " without pre-training"

        if 'vgg' in type_:

            from keras.applications.vgg16 import VGG16
            logging.info("Instantiating VGG model" + fine_tuning)
            self.model = VGG16(weights=weights,
                               input_shape=(3, 227, 227),
                               include_top=True)

        elif 'resnet' in type_:

            from keras.applications.resnet50 import ResNet50
            logging.info("Instantiating ResNet model" + fine_tuning)

            input_layer = Input(shape=(3, 224, 224))
            base_model = ResNet50(weights=weights,
                                  include_top=False,
                                  input_tensor=input_layer)

            x = base_model.output
            x = Flatten()(x)
            x = Dense(1024, activation='relu')(x)
            x = Dropout(0.5)(x)
            predictions = Dense(3, activation='softmax')(x)

            self.model = Model(input=base_model.input, output=predictions)
            # for layer in base_model.layers:
            #     layer.trainable = fine_tuning

        else:

            if 'googlenet' in type_:
                custom_objects = {"PoolHelper": PoolHelper, "LRN": LRN}
                mod_str = 'GoogLeNet'
            else:
                custom_objects = {}
                mod_str = 'custom'

            from googlenet import create_googlenet
            logging.info("Instantiating {} model".format(mod_str) +
                         fine_tuning)
            arch = network.get('arch', None)

            if arch is None:
                self.model = create_googlenet(network.get('no_classes', 3),
                                              network.get('no_features', 1024))
            else:
                self.model = model_from_json(open(arch).read(),
                                             custom_objects=custom_objects)

            if weights:
                print "Loading weights '{}'".format(weights)
                self.model.load_weights(weights, by_name=True)

        # Configure optimizer
        if build:
            opt_options = self.config['optimizer']
            name, loss, params = opt_options['type'], opt_options[
                'loss'], opt_options['params']
            optimizer = OPTIMIZERS[name](**params)
            self.model.compile(optimizer=optimizer,
                               loss=loss,
                               metrics=['accuracy'])