def __init__(self, model, thresh=0.5): self.meta = { 'object_scale': 5, 'classes': 1, 'out_size': [17, 17, 30], 'colors': [(0, 0, 254)], 'thresh': thresh, 'anchors': [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52], 'num': 5, 'labels': ['figure'] } self.graph = load_graph(model)
'anchors': [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52], 'num': 5, 'labels': ['figure'] } #annotation settings annotate = False if args.annotate == 0: pass elif args.annotate == 1: annotate = True else: print("Warining! This might be invalid annotation config") #load graph graph = load_graph(args.model) #list image files images = os.listdir(args.images) sub_figures = [] with tf.Session(graph=graph) as sess: print("---------------") print("Input diractory: %s" % args.images) print("The diractory has %s images" % len(images)) print("Extraction started") for img_file in images: #load image imgcv, imgcv_resized, img_input = preprocess(args.images + "/" + img_file)