Exemplo n.º 1
0
def makerModel():
    hist_input = Input(shape=(256,), name="hist_input")
    x1 = Dense(256, activation="sigmoid")(hist_input)
    x1 = Dense(128, activation="relu")(x1)
    x1 = Dense(12, activation="relu")(x1)

    x2 = Dense(256, activation="sigmoid")(hist_input)
    x2 = Dense(128, activation="relu")(x2)
    x2 = Dense(12, activation="relu")(x2)

    x3 = Dense(256, activation="sigmoid")(hist_input)
    x3 = Dense(128, activation="relu")(x3)
    x3 = Dense(12, activation="relu")(x3)

    x = concatenate([x1, x2, x3])

    hand_valid = Dense(1, name="hand_valid", activation="sigmoid")(x)

    model = Model(inputs=[hist_input], outputs=[hand_valid])

    # Compile model
    model.compile(loss="binary_crossentropy", metrics=["accuracy"], optimizer=OPT)

    if __name__ == "__main__":
        Console.info("Model summary")
        print(model.summary())

    # Load weight
    load_weights = args["weights"]
    if load_weights != None:
        Console.info("Loading weights from", load_weights)
        model.load_weights(load_weights)

    return model
Exemplo n.º 2
0
def trainModel(model, X_train, y_train):
    Console.info("Create validation sets, training set, testing set...")
    # Split images dataset
    k = int(len(X_train) / 6)  # Decides split count

    hist_test = X_train[:k]
    hand_test = y_train[:k]

    hist_valid = X_train[k: 2 * k]
    hand_valid = y_train[k: 2 * k]

    hist_train = X_train[2 * k:]
    hand_train = y_train[2 * k:]

    Console.info("Training network...")
    history = model.fit(
        hist_train,
        hand_train,
        batch_size=BATCH_SIZE,
        epochs=EPOCHS,
        verbose=2,
        validation_data=(hist_valid, hand_valid),
        callbacks=loadCallBack(),
    )

    Console.info("Save model to disck...")
    # Path to save model
    PATHE_SAVE_MODEL = os.path.join(__location__, "model")

    # Save weights after every epoch
    if not os.path.exists(PATHE_SAVE_MODEL):
        os.makedirs(PATHE_SAVE_MODEL)

    # serialize model to YAML
    model_yaml = model.to_yaml()
    with open(
        os.path.join(PATHE_SAVE_MODEL, "model_hands_not_hands.yaml"), "w"
    ) as yaml_file:
        yaml_file.write(model_yaml)
    # serialize weights to HDF5
    model.save_weights(os.path.join(PATHE_SAVE_MODEL, "model_hands_not_hands.h5"))
    # save image of build model
    # save image of build model
    plot_model(
        model,
        to_file=os.path.join(PATHE_SAVE_MODEL, "model_hands_not_hands.png"),
        show_shapes=True
    )
    print("OK")

    # evaluate the network
    Console.info("Evaluating network...")
    score = model.evaluate([hist_test], [hand_test], batch_size=BATCH_SIZE, verbose=1)

    Console.log("Test loss:", score[0])
    Console.log("Test Acc:", score[1])

    # list all data in history
    Console.info("Save model history graphics...")
    print(history.history.keys())
Exemplo n.º 3
0
def saveDataSet(X_train, y_train):
    Console.info("Save dataset")
    file_path = os.path.join(__location__, "dataset_hands",
                             "histogram-hand-dataset.hdf5")
    with h5py.File(file_path, "w") as f:
        f.create_dataset("hist", data=X_train)
        f.create_dataset("valid", data=y_train)
        f.flush()
        f.close()
Exemplo n.º 4
0
def saveDataSet(X_img, y_img):
    Console.info("Save dataset")
    X_img = np.asarray(X_img, dtype=np.float16)
    y_img = np.asarray(y_img, dtype=np.float16)
    # Split images dataset
    k = int(len(X_img) / 6)
    writeFile("testing", X_img[:k, :, :, :], y_img[:k, :, :, :])
    writeFile("validation", X_img[k:2 * k, :, :, :], y_img[k:2 * k, :, :, :])
    writeFile("training", X_img[2 * k:, :, :, :], y_img[2 * k:, :, :, :])
Exemplo n.º 5
0
def getFiles():
    Console.info("Get imges form", TRAIN_DIR)
    # file names on train_dir
    files = os.listdir(train_dir)
    # filter image files
    files = [f for f in files if fnmatch.fnmatch(f, "*.png")]
    # Sort randomly
    np.random.shuffle(files)
    return files[:CUT_DATASET]
Exemplo n.º 6
0
def openDataSet(dataset):
    Console.info("Opening dataset...")
    file_name = "img-for-autoencoder-" + dataset + ".hdf5"
    path_to_save = os.path.join(__location__, "dataset", file_name)
    with h5py.File(path_to_save, "r+") as f:
        X_img = f["x_img"][()]
        y_img = f["y_img"][()]
        f.close()

    return X_img, y_img
Exemplo n.º 7
0
def loadModel(dir_model_backup):
    Console.info("Get model and weights for", dir_model_backup)
    # load YAML and create model
    yaml_file = open(os.path.join(__location__, "model", dir_model_backup + ".yaml"), "r")
    loaded_model_yaml = yaml_file.read()
    yaml_file.close()
    model = model_from_yaml(loaded_model_yaml)
    # load weights into new model
    model.load_weights(os.path.join(__location__, "model", dir_model_backup + ".h5"))
    Console.log("Loaded model from disk")
    return model
Exemplo n.º 8
0
def processeImg(files, y_lower_upper):
    total_file = len(files)
    Console.log("Process", total_file, "images")
    x_files = []
    for i in range(total_file):
        min_color = int(y_lower_upper[0][i])
        max_color = int(y_lower_upper[1][i])
        min_color = min_color if min_color > 0 else 0
        max_color = max_color if max_color < 255 else 255
        x_files.append((files[i], min_color, max_color))

    # Usado en caso de usar multiples core
    output = multiprocessing.Queue()
    num_processes = multiprocessing.cpu_count()
    if platform.system() == "Linux" and num_processes > 1:
        processes = []

        lot_size = int(total_file / num_processes)

        for x in range(1, num_processes + 1):
            if x < num_processes:
                lote = x_files[(x - 1) * lot_size: ((x - 1) * lot_size) + lot_size]
            else:
                lote = x_files[(x - 1) * lot_size:]
            processes.append(Process(target=mpProcessImg, args=(lote, output)))

        if len(processes) > 0:
            Console.info("Fix colors of the images...")
            for p in processes:
                p.start()

            result = []
            for x in range(num_processes):
                result.append(output.get(True))

            for p in processes:
                p.join()

            X_values = []
            for x in result:
                X_values = X_values + x
            updateProgress(1, total_file, total_file, "")
            Console.log("Image processed:", len(X_values))

    else:
        Console.info("We can not divide the load into different processors")
        # X_values = mpGetHistogramFormFiles(files)
        exit(0)

    return X_values
Exemplo n.º 9
0
def getFiles(path_input):
    Console.info("Reading on", path_input)
    path = os.path.join(__location__)
    for p in path_input:
        path = os.path.join(path, p)

    rta = []
    # file names on train_dir
    files = os.listdir(path)
    # filter image files
    files = [f for f in files if fnmatch.fnmatch(f, "*.png")]

    for file_name in files:
        # Cut list of file
        if CUT_DATASET <= 0 or len(rta) < CUT_DATASET:
            rta.append(file_name)

    return rta
Exemplo n.º 10
0
def getFiles():
    Console.info("Reading img...")
    rta = []
    # defined path
    path = os.path.join(__location__, "dataset")
    path_original = os.path.join(path, "original")
    path_hands = os.path.join(path, "hands")
    # file names on train_dir
    files_original = os.listdir(path_original)
    files_hand = os.listdir(path_hands)
    # filter image files
    for x_img in files_original:
        for y_img in files_hand:
            if (fnmatch.fnmatch(x_img, "*.png")
                    and fnmatch.fnmatch(y_img, "*.png") and x_img == y_img):
                rta.append(x_img)

    return rta
Exemplo n.º 11
0
def getHistogramFormFiles(files=[]):
    total_file = len(files)
    Console.log("Process", total_file, "images")

    # Usado en caso de usar multiples core
    output = multiprocessing.Queue()
    num_processes = multiprocessing.cpu_count()
    if platform.system() == "Linux" and num_processes > 1:
        processes = []

        lot_size = int(total_file / num_processes)

        for x in range(1, num_processes + 1):
            if x < num_processes:
                lote = files[(x - 1) * lot_size: ((x - 1) * lot_size) + lot_size]
            else:
                lote = files[(x - 1) * lot_size:]
            processes.append(Process(target=mpGetHistogramFormFiles, args=(lote, output)))

        if len(processes) > 0:
            Console.info("Get histogram of the images...")
            for p in processes:
                p.start()

            result = []
            for x in range(num_processes):
                result.append(output.get(True))

            for p in processes:
                p.join()

            X_values = []
            for x in result:
                X_values = X_values + x
            updateProgress(1, total_file, total_file, "")
            Console.log("Image processed:", len(X_values))

    else:
        Console.info("We can not divide the load into different processors")
        # X_values = mpGetHistogramFormFiles(files)
        exit(0)

    return X_values
Exemplo n.º 12
0
def makeHandsHuman():
    files = getFiles()
    total_file = len(files)
    Console.info("Image total:", total_file)

    num_processes = multiprocessing.cpu_count()
    if platform.system() == "Linux" and num_processes > 1:
        processes = []

        lot_size = int(total_file / num_processes)

        for x in range(1, num_processes + 1):
            if x < num_processes:
                lot_img = files[(x - 1) * lot_size:((x - 1) * lot_size) +
                                lot_size]
            else:
                lot_img = files[(x - 1) * lot_size:]
            processes.append(Process(target=mpStart, args=(lot_img, output)))

        if len(processes) > 0:
            Console.info("Get histogram of the images...")
            for p in processes:
                p.start()

            result = []
            for x in range(num_processes):
                result.append(output.get(True))

            for p in processes:
                p.join()

            updateProgress(1, total_file, total_file, "")
    else:
        Console.info("No podemos dividir la cargan en distintos procesadores")
        exit(0)
Exemplo n.º 13
0
def loadCallBack():
    cb = []

    if args["stepDecay"] == "True":
        def stepDecay(epoch):
            # initialize the base initial learning rate, drop factor, and epochs to drop every
            initAlpha = 0.01
            # factor = 0.25
            factor = 0.5
            dropEvery = 5
            # compute learning rate for the current epoch
            alpha = initAlpha * (factor ** np.floor((1 + epoch) / dropEvery))
            # return the learning rate
            return float(alpha)
        cb.append(LearningRateScheduler(stepDecay))

    # TensorBoard
    # how to use: $ tensorboard --logdir path_to_current_dir/Graph
    if args["tensorBoard"] == "True":
        # Save log for tensorboard
        LOG_DIR_TENSORBOARD = os.path.join(__location__, "..", "tensorboard")
        if not os.path.exists(LOG_DIR_TENSORBOARD):
            os.makedirs(LOG_DIR_TENSORBOARD)

        tbCallBack = keras.callbacks.TensorBoard(
            log_dir=LOG_DIR_TENSORBOARD,
            batch_size=BATCH_SIZE,
            histogram_freq=0,
            write_graph=True,
            write_images=True,
        )

        Console.info("tensorboard --logdir", LOG_DIR_TENSORBOARD)
        cb.append(tbCallBack)

    if args["checkpoint"] == "True":
        # Save weights after every epoch
        if not os.path.exists(os.path.join(__location__, "weights")):
            os.makedirs(os.path.join(__location__, "weights"))
        checkpoint = keras.callbacks.ModelCheckpoint(
            filepath="weights/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
            monitor='val_loss',
            verbose=1,
            save_weights_only=True,
            period=1
        )
        Console.info("Save weights after every epoch")
        cb.append(checkpoint)

    # Reduce learning rate
    if args["reduce_learning"] == "True":
        reduceLROnPlat = keras.callbacks.ReduceLROnPlateau(
            monitor="val_loss", factor=0.8, patience=3, verbose=1, min_lr=0.0001
        )
        Console.info("Add Reduce learning rate")
        cb.append(reduceLROnPlat)

    return cb
Exemplo n.º 14
0
def progressFiles(path_input, files, hands_valid):
    path = os.path.join(__location__)
    for p in path_input:
        path = os.path.join(path, p)

    total_file = len(files)
    Console.info("Image total:", total_file)

    num_processes = multiprocessing.cpu_count()
    if platform.system() == "Linux" and num_processes > 1:
        processes = []

        lot_size = int(total_file / num_processes)

        for x in range(1, num_processes + 1):
            if x < num_processes:
                lot_img = files[(x - 1) * lot_size:((x - 1) * lot_size) +
                                lot_size]
            else:
                lot_img = files[(x - 1) * lot_size:]
            processes.append(
                Process(target=mpStart,
                        args=(path, lot_img, hands_valid, output, x)))

        if len(processes) > 0:
            Console.info("Get histogram of the images...")
            for p in processes:
                p.start()

            result = []
            for x in range(num_processes):
                result.append(output.get(True))

            for p in processes:
                p.join()

            X_train = []
            y_train = []
            for mp_X_train, mp_y_train in result:
                X_train = X_train + mp_X_train
                y_train = y_train + mp_y_train
            updateProgress(1, total_file, total_file, "")

            return X_train, y_train
    else:
        Console.info("No podemos dividir la cargan en distintos procesadores")
        exit(0)
Exemplo n.º 15
0
# Los procesos hijos cargan el mismo código.
# Este if permite que solo se ejecute lo que sigue si es llamado
# como proceso raíz.
if __name__ == "__main__":
    if args["predict"] == None or args["predict"] == "True":
        (X_train, y_train) = openDataSet()
        Console.log("Dataset file count", len(y_train))

    # Create model
    model = makerModel()

    if args["train"] == "True":
        trainModel(model, X_train, y_train)

    if args["evaluate"] == "True":
        Console.info("Evaluating model...")
        score = model.evaluate([X_train], [y_train], batch_size=BATCH_SIZE, verbose=1)
        Console.log("Test loss:", score[0])
        Console.log("Test Acc:", score[1])

    if args["predict"] != None and args["predict"] != "False":
        if args["predict"] != "True":
            Console.info("Predict for", args["predict"])
        else:
            Console.info("Predict...")
            # new instance where we do not know the answer
            Xnew = X_train
            Xnew = np.array(Xnew)

            # make a prediction
            ynew = model.predict(Xnew)
Exemplo n.º 16
0

if __name__ == "__main__":
    for folder in ["hand", "not_hand"]:
        if not os.path.exists(os.path.join(__location__, "deep_fight", folder)):
            os.makedirs(os.path.join(__location__, "deep_fight", folder))

    model_get_hand = loadModel("model_histogram")
    model_valid_hand = loadModel("model_hands_not_hands")

    # Read img files
    files = getFiles()
    # Get hist for hand
    X_hist_hands = getHistogramFormFiles(files)

    Console.info("Run model_get_hand")
    files = []
    X_to_predict = []
    for img_file, hist in X_hist_hands:
        files.append(img_file)
        X_to_predict.append(hist)
    X_to_predict = np.array(X_to_predict)
    # make a prediction
    y_lower_upper = model_get_hand.predict(X_to_predict)

    Console.info("Fix image colors")
    X_img = processeImg(files, y_lower_upper)

    Console.info("Run model_valid_hand")
    files = []
    X_to_predict = []