def main(input1):
  
    input1=doPasing()
    testDataset = cache(cache_path='my_dataset_cache_test.pkl',
                    fn=testDataset, 
                    input_dir=input1.input_dir)
    imagesOfTests = testDataset.images

    graph = tf.Graph()
    with graph.as_default():
        with tf.Session() as sess:
            #import the model dir
            try:
                file_=Path(input1.meta_file)
                abs_path=file_.resolve()
            except FileNotFoundError:
                sys.exit('Meta File Not found')
            else:
                imported_meta = tf.train.import_meta_graph(input1.meta_file)
                       
            if os.path.isdir(input1.chk_point_dir):
                imported_meta.restore(sess, tf.train.latest_checkpoint(input1.chk_point_dir))
                for i in range(0,h_no):
                    for j in range(0,):
                        a = predictionOfM[i,j]
                        theOutput[128*i:128*(i+1),128*j:128*(j+1),:] = 1-a
            
                newImages = image[0:h_no*128,0:*128,:]                    
                prediction = np.multiply(theOutput,newImages)
            else:
Пример #2
0
def main(args):
    #File names are saved into a cache file
    args = parse_arguments()
    dataset_test = cache(cache_path='my_dataset_cache_test.pkl',
                         fn=Dataset_test,
                         in_dir=args.in_dir)
    test_images = dataset_test.images

    graph = tf.Graph()
    with graph.as_default():
        with tf.Session() as sess:
            #import the model dir
            try:
                file_ = Path(args.meta_file)
                abs_path = file_.resolve()
            except FileNotFoundError:
                sys.exit('Meta File Not found')
            else:
                imported_meta = tf.train.import_meta_graph(args.meta_file)

            if os.path.isdir(args.chk_point_dir):
                imported_meta.restore(
                    sess, tf.train.latest_checkpoint(args.chk_point_dir))
            else:
                sys.exit("Check Point Directory does not exist")

            x = graph.get_operation_by_name("x").outputs[0]
            predictions = graph.get_operation_by_name("predictions").outputs[0]

            #Take one image at a time, pass it through the network and save it
            for counter, image in enumerate(test_images):
                broken_image, h, w, h_no, w_no = break_image(image, 128)

                output_image = np.zeros((h_no * 128, w_no * 128, 3),
                                        dtype=np.uint8)

                feed_dict = {x: broken_image}
                batch_predictions = sess.run(predictions, feed_dict=feed_dict)

                matrix_pred = batch_predictions.reshape((h_no, w_no))
                #Concentrate after this for post processing
                for i in range(0, h_no):
                    for j in range(0, w_no):

                        a = matrix_pred[i, j]
                        output_image[128 * i:128 * (i + 1),
                                     128 * j:128 * (j + 1), :] = 1 - a

                cropped_image = image[0:h_no * 128, 0:w_no * 128, :]
                pred_image = np.multiply(output_image, cropped_image)

                print("Saved {} Image(s)".format(counter + 1))
                cv2.imwrite(
                    os.path.join(args.save_dir,
                                 'outfile_{}.jpg'.format(counter + 1)),
                    pred_image)
Пример #3
0
def main(args):
    #File names are saved into a cache file
    args=parse_arguments()
    dataset_test = cache(cache_path='my_dataset_cache_test.pkl',
                    fn=Dataset_test, 
                    in_dir=args.in_dir)
    test_images = dataset_test.images

    graph = tf.Graph()
    with graph.as_default():
        with tf.Session() as sess:
            #import the model dir
            try:
                file_=Path(args.meta_file)
                abs_path=file_.resolve()
            except FileNotFoundError:
                sys.exit('Meta File Not found')
            else:
                imported_meta = tf.train.import_meta_graph(args.meta_file)
                       
            if os.path.isdir(args.chk_point_dir):
                imported_meta.restore(sess, tf.train.latest_checkpoint(args.chk_point_dir))
            else:
                sys.exit("Check Point Directory does not exist")
            
            x = graph.get_operation_by_name("x").outputs[0]
            predictions = graph.get_operation_by_name("predictions").outputs[0]
            
            #Take one image at a time, pass it through the network and save it
            for counter,image in enumerate(test_images):
                broken_image,h,w,h_no,w_no = break_image(image,128)
        
                output_image = np.zeros((h_no*128,w_no*128,3),dtype = np.uint8)
                                            
                feed_dict = {x: broken_image}
                batch_predictions = sess.run(predictions, feed_dict = feed_dict)
            
                matrix_pred = batch_predictions.reshape((h_no,w_no))
                #Concentrate after this for post processing
                for i in range(0,h_no):
                    for j in range(0,w_no):
                        a = matrix_pred[i,j]
                        output_image[128*i:128*(i+1),128*j:128*(j+1),:] = 1-a
            
                cropped_image = image[0:h_no*128,0:w_no*128,:]                    
                pred_image = np.multiply(output_image,cropped_image)

                print("Saved {} Image(s)".format(counter+1))
                cv2.imwrite(os.path.join(args.save_dir,'outfile_{}.jpg'.format(counter+1)), pred_image)
Пример #4
0
def main(args):

    #File names are saved into a cache file
    args = parse_arguments()
    dataset_test = cache(cache_path='my_dataset_cache_test.pkl',
                         fn=Dataset_test,
                         in_dir=args.in_dir)
    test_images = dataset_test.images
    nums = len(list(enumerate(test_images)))
    graph = tf.Graph()
    with graph.as_default():
        with tf.Session() as sess:
            #import the model dir
            try:
                file_ = Path(args.meta_file)
                abs_path = file_.resolve()
            except FileNotFoundError:
                sys.exit('Meta File Not found')
            else:
                imported_meta = tf.train.import_meta_graph(args.meta_file)

            if os.path.isdir(args.chk_point_dir):
                imported_meta.restore(
                    sess, tf.train.latest_checkpoint(args.chk_point_dir))
            else:
                sys.exit("Check Point Directory does not exist")

            x = graph.get_operation_by_name("x").outputs[0]
            predictions = graph.get_operation_by_name("predictions").outputs[0]

            leng = []
            leng.append(nums)
            #Take one image at a time, pass it through the network and save it
            for counter, image in enumerate(test_images):
                cnt = 0
                broken_image, h, w, h_no, w_no = break_image(image, 128)

                output_image = np.zeros((h_no * 128, w_no * 128, 3),
                                        dtype=np.uint8)

                feed_dict = {x: broken_image}
                batch_predictions = sess.run(predictions, feed_dict=feed_dict)

                matrix_pred = batch_predictions.reshape((h_no, w_no))
                #Concentrate after this for post processing
                for i in range(0, h_no):
                    for j in range(0, w_no):
                        a = matrix_pred[i, j]
                        output_image[128 * i:128 * (i + 1),
                                     128 * j:128 * (j + 1), :] = 1 - a
                        if 1 - a == 1:
                            cnt += 1

                cropped_image = image[0:h_no * 128, 0:w_no * 128, :]
                pred_image = np.multiply(output_image, cropped_image)

                print("Saved {} Image(s)".format(counter + 1))
                leng.append(cnt * 128 * 1.414)
                cv2.imwrite(
                    os.path.join(args.save_dir,
                                 'outfile_{}.jpg'.format(counter + 1)),
                    pred_image)

            index = 0
            for img in os.listdir(
                    "./Save"):  # read all the images in dir "Save"
                # if not hidden folder
                if not img.startswith('.'):
                    index += 1
                    # read the image img1
                    img1 = cv2.imread("./Save" + "/" + img,
                                      cv2.IMREAD_GRAYSCALE)
                    # get the shape
                    size = img1.shape
                    lengthcnt = 0
                    widthcnt = 0
                    length = [0] * size[1]
                    width = [0] * size[0]
                    for i in range(size[0]):
                        for j in range(size[1]):
                            # if this pix is in the crack-zone
                            if img1[i][j] > 0 and length[j] == 0:
                                length[j] = 1
                                lengthcnt += 1
                            if img1[i][j] > 0 and width[i] == 0:
                                width[i] = 1
                                widthcnt += 1

                    # get the length of the crack
                    len_vec = leng
                    totLen = len_vec[len_vec[0] - index + 1]

                    print(
                        img +
                        "中裂缝的行投影为:%d像素,占图片长度的%.2f%%,裂缝的列投影为:%d像素,占图片高度的%.2f%%。裂缝的总长约为:%.3f像素"
                        % (lengthcnt, lengthcnt * 100.0 / size[1], widthcnt,
                           widthcnt * 100.0 / size[0], totLen))