예제 #1
0
class process_network:
    def __init__(self):
        ap = argparse.ArgumentParser()
        ap.add_argument("-m",
                        "--model",
                        required=True,
                        help="path to trained model mode")
        ap.add_argument("-i",
                        "--image",
                        required=True,
                        help="path to input image")
        self.args = vars(ap.parse_args())
        print("[*] Loading network ..")
        self.model = load_model(self.args["model"])
        print("[*] Done")
        self.path_frame = self.args["image"]
        #self.create_session()
        self.frame = cv2.imread(self.path_frame)
        self.load_img(self.frame)
        self.getProba()
        #self.close_session()

    def create_session(self):
        print("[*] Settings config ..")
        config = ConfigProto()
        config.gpu_options.allow_growth = True
        self.session = InteractiveSession(config=config)
        print("[*] Done")

    def close_session(self):
        print("[!] Closing session ..")
        self.session.close()
        del session
        print("[*] Done")

    def load_img(self, frame):
        #image = cv2.imread(args["image"])
        self.image = frame
        self.orig = self.image.copy()
        self.image = cv2.resize(self.image, IMAGE_SIZE)
        self.image = self.image.astype("float") / 255.0
        self.image = img_to_array(self.image)
        self.image = np.expand_dims(
            self.image, axis=0)  # to have dims (1, width, height, 3)

    def getProba(self):
        nothugo, hugo = self.model.predict(self.image)[0]
        label = "Hugo" if hugo > nothugo else "Unknown"
        proba = hugo if hugo > nothugo else nothugo
        print("Hugo {} Not Hugo {}".format(str(hugo), str(nothugo)))
        label = "{}: {:.2f}%".format(label, proba * 100)
        output = imutils.resize(self.orig, width=400)
        cv2.putText(output, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)
        cv2.imshow("Output", output)
        cv2.waitKey(0)
예제 #2
0
파일: main.py 프로젝트: nut41035/lab
def train_and_eval(DIR,
                   EPOCHS=20,
                   BS=16,
                   IMAGE_COUNT=139,
                   VALIDATION_COUNT=134,
                   learning_rate=0.05,
                   beta=0.5,
                   input_size=(64, 64, 32, 1)):
    # session setting
    """ os.environ['TF_CPP_MIN_LOG_LEVEL']
      0 = all messages are logged (default behavior)
      1 = INFO messages are not printed
      2 = INFO and WARNING messages are not printed
      3 = INFO, WARNING, and ERROR messages are not printed
    """
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    gpu_options = GPUOptions(per_process_gpu_memory_fraction=1.0)
    config = ConfigProto(gpu_options=gpu_options)
    config.gpu_options.allow_growth = True
    session = InteractiveSession(config=config)
    print("Number of GPUs Available: ",
          len(tf.config.experimental.list_physical_devices('GPU')))

    ## program parameter
    BASE_DIR = DIR
    TRAIN_DIR_PATH = BASE_DIR + 'train/'
    VALIDATION_DIR_PATH = BASE_DIR + 'validation/'
    seed = 1
    time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    # create logs
    log_dir = "logs/fit/" + time_stamp
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=0)
    file_writer = tf.summary.create_file_writer(log_dir)

    ## training parameter
    loss_func = tversky_loss(beta)
    steps_per_epoch = 70
    input_size = input_size

    ## construct training and validation set
    training_data = PPDataGenerator(TRAIN_DIR_PATH,
                                    batch_size=BS,
                                    image_size=64)
    validating_data = PPDataGenerator(VALIDATION_DIR_PATH,
                                      batch_size=BS,
                                      image_size=64)

    ## load model
    model = unet_norm(input_size=input_size,
                      loss_func=loss_func,
                      l_rate=learning_rate)
    print('#### Model loaded')

    ## training begin
    model.fit_generator(training_data,
                        steps_per_epoch=steps_per_epoch,
                        epochs=EPOCHS,
                        validation_data=validating_data,
                        callbacks=[tensorboard_callback])

    if not os.path.exists('./model/'):
        os.makedirs('./model/')
    model.save("model/UNet_%s.h5" % time_stamp)
    print("model saved at   model/UNet_%s.h5" % time_stamp)

    text = 'UNet_%s.h5\n\
            loss: weighted_dice  %s\n\
            learninf rate: %s\n\
            image size: %s\n'\
            %(time_stamp, beta,learning_rate,input_size)
    with open("./log.txt", "a") as myfile:
        myfile.write(text)

    ## prediction begin
    TP_sum, FP_sum, FN_sum, TP_P_sum, FP_P_sum, FN_P_sum = predict_folder(
        model,
        '%stest/' % BASE_DIR,
        save_mode=4,
        save_dir='./result/%s' % (time_stamp))
    eval_precision = divide(TP_sum, TP_sum + FP_sum)
    eval_recall = divide(TP_sum, TP_sum + FN_sum)
    eval_precision_P = divide(TP_P_sum, TP_P_sum + FP_P_sum)
    eval_recall_P = divide(TP_P_sum, TP_P_sum + FN_P_sum)
    text = 'Evaluation result:  %s\n\
            TP : %s\n\
            FP : %s\n\
            FN : %s\n\
            Recall by pixel: %s\n\
            Precision by pixel: %s\n\
            Recall by area: %s\n\
            Precision by area: %s\n\n\n'\
            %(time_stamp, TP_sum, FP_sum, FN_sum, eval_recall_P, eval_precision_P, eval_recall, eval_precision)
    with open("./log.txt", "a") as myfile:
        myfile.write(text)

    file_writer.close()
    InteractiveSession.close(session)
예제 #3
0
# Compile model
model.compile(loss='categorical_crossentropy',
              optimizer=Optim,
              metrics=['accuracy'])
model.load_weights(
    os.path.dirname(InitialModel) + 'W_' + os.path.basename(InitialModel))
model.summary()
'''Tune and Fit CNN'''
Labels1Hot = to_categorical(Label)

(trainX, testX, trainY, testY) = train_test_split(Tensor,
                                                  Labels1Hot,
                                                  test_size=0.2)
when2stop = MyThresholdCallback(threshold=targetacc)
history = model.fit(trainX,
                    trainY,
                    epochs=120,
                    batch_size=batches,
                    validation_data=(testX, testY),
                    callbacks=when2stop)
Predictions = np.argmax(model.predict(testX), axis=1)
Observations = np.argmax(testY, axis=1)
report = metrics.classification_report(Observations, Predictions, digits=3)
print('Classification report for ' + TuningDataName)
print(report)
model.save(OutPath + TuningDataName + '.h5')

#close the TF session
session.close()
예제 #4
0
파일: leeson06.py 프로젝트: honwaii/nlp
# @Author  : honwaii
# @Email   : [email protected]
# @File    : leeson06.py

from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

# tf.executing_eagerly()
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)

InteractiveSession.close()


def basic_operation():
    x = [[2.]]
    m = tf.matmul(x, x)
    print("x matmul x = {}".format(m))
    a = tf.constant([[1, 2], [3, 4]])
    print(a)
    b = tf.add(a, 1)
    print(b)
    # element-wise multiplication
    print(a * b)
    print(tf.matmul(a, b))
    c = np.multiply(a, b)
    print(c)
예제 #5
0
                       reconstructed[i],
                       cmap=plt.cm.gray)
    print('\nreconstruction data saved to : \n', pred_res_path)

# In[]:
# illustrate the results
start = 0
end = len(reconstructed) - 1
idx = np.linspace(start, end,
                  10).astype('int32')  # show 10 results at equal intervals

in_images = in_imgs[idx]
recon_images = reconstructed[idx]

fig, axes = plt.subplots(nrows=2,
                         ncols=10,
                         sharex=True,
                         sharey=True,
                         figsize=(20, 4))
for images, row in zip([in_images, recon_images], axes):
    for img, ax in zip(images, row):
        ax.imshow(img.reshape((*pic_size)), cmap='gray')
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
plt.show()

# In[24]:
# release
sess.close()
예제 #6
0
def main(image_dir="./", net_loc="../cnn_mnist_10c.h5"):
    config = ConfigProto()
    config.gpu_options.allow_growth = True
    session = InteractiveSession(config=config)
    K.set_session(session)

    print(image_dir)
    print(net_loc)
    # from ptpdb import set_trace

    # set_trace()
    # imcollection = np.array(imread_collection(image_dir))[:, :, :, 0]
    imcollection = np.array(imread_collection(f"{image_dir}/*.png"))

    net_generated_data = np.expand_dims(imcollection, 3)

    x_real_train, x_real_test = keras_extract_mnist_digits()
    num_samples = min(len(net_generated_data), len(x_real_test))

    x_real_train = x_real_train / 255
    x_real_test = x_real_test / 255
    net_generated_data = net_generated_data / 255

    np.random.shuffle(x_real_train)
    np.random.shuffle(x_real_test)
    np.random.shuffle(net_generated_data)

    x_real_train = x_real_train[:num_samples]
    x_real_test = x_real_test[:num_samples]

    full_classifier = keras.models.load_model(net_loc)
    req_layer = "flatten_1"
    classifier = Model(
        inputs=full_classifier.input,
        outputs=full_classifier.get_layer(req_layer).output,
    )

    print("Calculating FCD for train data")
    fcd_train = compute_real_fcd(x_real_train, classifier)
    print("Calculating FCD for test data")
    fcd_test = compute_real_fcd(x_real_test, classifier)

    print(
        f"samples = {num_samples} train fcd = {fcd_train:.3g} test fcd = {fcd_test:.3g}"
    )

    net_real_data = x_real_train

    assert len(net_generated_data) == len(net_real_data)
    print(
        np.max(net_generated_data),
        np.min(net_generated_data),
        f"{np.std(net_generated_data):.3f}",
        f"{np.mean(net_generated_data):.3f}",
    )
    print(
        np.max(net_real_data),
        np.min(net_real_data),
        f"{np.std(net_real_data):.3f}",
        f"{np.mean(net_real_data):.3f}",
    )
    real_act = classifier.predict(net_real_data)
    print(real_act.shape)
    gen_act = classifier.predict(net_generated_data)

    print("Calculating FCD for generated data")
    fcd_tensor = diagonal_only_frechet_classifier_distance_from_activations(
        tf.convert_to_tensor(real_act), tf.convert_to_tensor(gen_act))

    fcd = session.run(fcd_tensor)
    print(f"fcd = {fcd:.3g}")
    session.close()
    sys.exit(0)

    fcd_iters = 2

    gen_fcd_arr = []
    for fcd_i in range(fcd_iters):

        # inverse normalization due to tanh
        # net_generated_data = (net_generated_data + 1) / 2

        net_real_data = x_real_train

        assert len(net_generated_data) == len(net_real_data)
        print(
            np.max(net_generated_data),
            np.min(net_generated_data),
            f"{np.std(net_generated_data):.3f}",
            f"{np.mean(net_generated_data):.3f}",
        )
        print(
            np.max(net_real_data),
            np.min(net_real_data),
            f"{np.std(net_real_data):.3f}",
            f"{np.mean(net_real_data):.3f}",
        )

        np.random.shuffle(net_generated_data)
        np.random.shuffle(net_real_data)

        real_act = classifier.predict(net_real_data)
        gen_act = classifier.predict(net_generated_data)

        print("Calculating FCD for generated data")
        fcd_tensor = diagonal_only_frechet_classifier_distance_from_activations(
            tf.convert_to_tensor(real_act), tf.convert_to_tensor(gen_act))

        sess = K.get_session()

        fcd = sess.run(fcd_tensor)
        gen_fcd_arr.append(fcd)
예제 #7
0
class Application:
    def __init__(self):
        ap = argparse.ArgumentParser()
        ap.add_argument("-d",
                        "--dataset",
                        required=True,
                        type=str,
                        help="path to input dataset")
        ap.add_argument("-p",
                        "--plot",
                        type=str,
                        default="plot.png",
                        help="path to output accuracy/loss plot")
        ap.add_argument("-n",
                        "--number_images",
                        type=int,
                        default=5000,
                        help="number of images to load")
        ap.add_argument("-e",
                        "--epochs",
                        type=int,
                        default=25,
                        help="number of epochs")
        ap.add_argument("-b",
                        "--batch_size",
                        type=int,
                        default=16,
                        help="batch size")
        ap.add_argument("-lr",
                        "--learning_rate",
                        type=float,
                        default=1e-3,
                        help="learning rate")
        ap.add_argument(
            "-m",
            "--model",
            type=str,
            default="RESNET50",
            help=
            "model to choose `LENET` or `INCEPTIONV3` or `RESNET50` or `VGG16`"
        )
        ap.add_argument("-g",
                        "--gpu",
                        type=str,
                        default="yes",
                        help="Use the gpu `yes` or `no`")
        self.args = vars(ap.parse_args())
        self.model = Model(self.args)

    def run(self):
        self.model.run()
        self.model.save_plot()

    def create_session(self):
        print("[*] Settings config ..")
        if self.args["gpu"] == "no":
            os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
        if tf.test.gpu_device_name():
            print('GPU found')
        else:
            print("No GPU found")
        config = ConfigProto()
        self.session = InteractiveSession(config=config)
        print("[*] Done")
        #config.gpu_options.allow_growth = True
        #config.gpu_options.allocator_type = "BFC"
        #config.gpu_options.per_process_gpu_memory_fraction = 0.90

    def close_session(self):
        print("[!] Closing session ..")
        self.session.close()
        del self.session
        print("[*] Done")
예제 #8
0
def train_and_eval(params):
    # session setting
    """ os.environ['TF_CPP_MIN_LOG_LEVEL']
      0 = all messages are logged (default behavior)
      1 = INFO messages are not printed
      2 = INFO and WARNING messages are not printed
      3 = INFO, WARNING, and ERROR messages are not printed
    """
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    gpu_options = GPUOptions(per_process_gpu_memory_fraction=1.0)
    config = ConfigProto(gpu_options=gpu_options)
    config.gpu_options.allow_growth = True
    session = InteractiveSession(config=config)
    print("Number of GPUs Available: ",
          len(tf.config.experimental.list_physical_devices('GPU')))

    ## program parameter
    BASE_DIR = params[0]
    TRAIN_DIR_PATH = BASE_DIR + 'train/'
    VALIDATION_DIR_PATH = BASE_DIR + 'validation/'
    seed = params[1]
    time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    log_dir = params[2] + time_stamp
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=0)
    file_writer = tf.summary.create_file_writer(log_dir)

    ## training parameter
    loss_func = params[3]
    input_size = params[4]
    steps_per_epoch = params[5]
    EPOCHS = params[6]
    BS = params[7]
    IMAGE_COUNT = params[8]
    VALIDATION_COUNT = params[9]
    learning_rate = params[10]

    ## construct training and validation set
    training_data = DataGenerator(TRAIN_DIR_PATH,
                                  batch_size=BS,
                                  image_size=input_size[0])
    validating_data = DataGenerator(VALIDATION_DIR_PATH,
                                    batch_size=BS,
                                    image_size=input_size[0])

    ## load model
    model = unet(input_size=input_size,
                 loss_func=loss_func,
                 l_rate=learning_rate)
    model.summary()
    print('#### Model loaded')

    ## training begin
    model.fit_generator(training_data,
                        steps_per_epoch=steps_per_epoch,
                        epochs=EPOCHS,
                        validation_data=validating_data,
                        callbacks=[tensorboard_callback])

    if not os.path.exists('./model/'):
        os.makedirs('./model/')
    model.save("model/UNet_%s.h5" % time_stamp)
    print("model saved at   model/UNet_%s.h5" % time_stamp)

    text = 'UNet_%s.h5\n\
            Learning rate: %s\n\
            Image size: %s\n\
            Epoch: %s\n\
            Batch size: %s\n\
            Step per epoch: %s\n'\
            %(time_stamp, learning_rate, input_size, steps_per_epoch, BS, EPOCHS)
    with open("./log.txt", "a") as myfile:
        myfile.write(text)
    file_writer.close()
    InteractiveSession.close(session)

    ## prediction begin
    predict_folder(model,
                   '%stest/' % BASE_DIR,
                   save_dir='./result/%s' % (time_stamp))
    InteractiveSession.close(session)