def calib_input(iter):
  images = []
  line = open(calib_image_list).readlines()
  #print(line)
  for index in range(0, calib_batch_size):
      curline = line[iter * calib_batch_size + index]
      #print(curline)
      calib_image_name = curline.strip()

      # read image as rgb, returns numpy array (32,32, 3)
      image = cv2.imread(calib_image_dir + calib_image_name)
      ##swap channels: from BGR to RGB
      #B, G, R = cv2.split(image)
      #image = cv2.merge([R, G, B])

      img_array = img_to_array(image, data_format=None)

      # scale the pixel values to range 0 to 1.0
      image2 = cfg.Normalize(img_array)

      # reshape numpy array to be (32,32,3)
      image2 = image2.reshape((image2.shape[0], image2.shape[1], 3))
      images.append(image2)

  return {"conv2d_1_input": images}
Beispiel #2
0
def calib_input(iter):
    assert (int(iter) <= int(tot_num_images / calib_batch_size)
            ), "number of iterations must be <=20"
    images = []
    #print(line)
    for index in range(0, calib_batch_size):
        #print(iter-1, index)
        curline = line[(iter - 1) * calib_batch_size + index]
        #print(curline)
        calib_image_name = curline.strip()

        # read image as rgb, returns numpy array (32,32, 3)
        filename = os.path.join(calib_image_dir, calib_image_name)
        image = cv2.imread(filename)
        ##swap channels: from BGR to RGB
        #B, G, R = cv2.split(image)
        #image = cv2.merge([R, G, B])

        #img_array = img_to_array(image, data_format=None)

        # scale the pixel values to range 0 to 1.0
        image2 = cfg.Normalize(image)

        # reshape numpy array to be (32,32,3)
        image2 = image2.reshape((image2.shape[0], image2.shape[1], 3))
        images.append(image2)

    return {"conv2d_1_input": images}
Beispiel #3
0
y_valid = to_categorical(y_valid, 10)

# check settings #DB
assert True, (len(x_train) > cfg.NUM_TRAIN_IMAGES)
assert True, (len(x_test) >= (cfg.NUM_TRAIN_IMAGES + cfg.NUM_VAL_IMAGES))
assert True, (cfg.NUM_TRAIN_IMAGES == cfg.NUM_VAL_IMAGES)

#################################################################################
# pre-process the data

x_test = np.asarray(x_test)
x_train = np.asarray(x_train)
x_valid = np.asarray(x_valid)

#Normalize and convert from BGR to RGB
x_train = cfg.Normalize(x_train)
x_test = cfg.Normalize(x_test)
x_valid = cfg.Normalize(x_valid)

##################################################################################################
# construct the callback to save only the *best* model to disk
# based on the validation loss
fname = os.path.sep.join([weights, "best_chkpt.hdf5"])
checkpoint = ModelCheckpoint(
    fname,
    monitor="val_loss",
    mode="min",
    #monitor="val_acc", mode="max",
    save_best_only=True,
    verbose=1)
Beispiel #4
0
def graph_eval(input_graph_def, input_node, output_node):

    #Reading image paths
    test_img_paths = [
        img_path for img_path in glob.glob(TEST_DIR + "/*/*.png")
    ]
    NUMEL = len(test_img_paths)
    assert (NUMEL > 0)

    y_test = np.zeros((NUMEL, 1), dtype="uint8")
    x_test = np.zeros((NUMEL, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH, 3),
                      dtype="uint8")

    i = 0
    for img_path in test_img_paths:
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)
        ##swap channels: from BGR to RGB
        #B, G, R = cv2.split(img)
        #img = cv2.merge([R, G, B])

        img_array = img_to_array(img, data_format=None)

        filename = os.path.basename(img_path)
        class_name = filename.split("_")[0]
        label = cfg.labelNames_dict[class_name]

        #print("filename: ", img_path)
        #print("classname: ", class_name)

        x_test[i] = img_array
        y_test[i] = int(label)
        i = i + 1

    x_test = cfg.Normalize(x_test)
    #print(x_test[0])

    #collect garbage to save memory #DB
    #del img
    #del test_img_paths
    #del img_path
    #gc.collect()

    x_test = np.reshape(x_test, [-1, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH, 3])
    y_test = tf.keras.utils.to_categorical(y_test, num_classes=cfg.NUM_CLASSES)

    tf.compat.v1.import_graph_def(input_graph_def, name='')

    # Get input placeholders & tensors
    images_in = tf.compat.v1.get_default_graph().get_tensor_by_name(
        input_node + ':0')
    labels = tf.compat.v1.placeholder(tf.int32, shape=[None, cfg.NUM_CLASSES])

    # get output tensors
    logits = tf.compat.v1.get_default_graph().get_tensor_by_name(output_node +
                                                                 ':0')

    # top 5 and top 1 accuracy
    in_top5 = tf.nn.in_top_k(predictions=logits,
                             targets=tf.argmax(labels, 1),
                             k=5)
    in_top1 = tf.nn.in_top_k(predictions=logits,
                             targets=tf.argmax(labels, 1),
                             k=1)
    top5_acc = tf.reduce_mean(tf.cast(in_top5, tf.float32))
    top1_acc = tf.reduce_mean(tf.cast(in_top1, tf.float32))

    # Create the Computational graph
    with tf.compat.v1.Session() as sess:

        sess.run(tf.compat.v1.initializers.global_variables())

        feed_dict = {images_in: x_test, labels: y_test}
        t5_acc, t1_acc = sess.run([top5_acc, top1_acc], feed_dict)
        #print(dir(x_test))
        #print(max(x_test[0]))
        #print(min(x_test[0]))
        print(' Top 1 accuracy with validation set: {:1.4f}'.format(t1_acc))
        print(' Top 5 accuracy with validation set: {:1.4f}'.format(t5_acc))

    print('FINISHED!')
    return