Пример #1
0
split = 'train'
annFile='%s/Annotations/%s.json'%(dataDir, split)
imgDir = '%s/Images/' %dataDir

# initialize VQA api for QA annotations
vqa=VQA(annFile)

# load and display QA annotations for given answer types
"""
ansTypes can be one of the following
yes/no
number
other
unanswerable
"""
anns = vqa.getAnns(ansTypes='yes/no');   
randomAnn = random.choice(anns)
vqa.showQA([randomAnn])
imgFilename = randomAnn['image']
if os.path.isfile(imgDir + imgFilename):
	I = io.imread(imgDir + imgFilename)
	plt.imshow(I)
	plt.axis('off')
	plt.show()

# load and display QA annotations for given images
imgs = vqa.getImgs()
anns = vqa.getAnns(imgs=imgs)
randomAnn = random.choice(anns)
vqa.showQA([randomAnn])  
imgFilename = randomAnn['image']
Пример #2
0
                 for i in np.arange(0, 256)]).astype("uint8")
lut2 = np.array([np.random.randint(0, 255)
                 for i in np.arange(0, 256)]).astype("uint8")
lut3 = np.array([np.random.randint(0, 255)
                 for i in np.arange(0, 256)]).astype("uint8")
lut = np.dstack((lut1, lut2, lut3))

np.random.seed(7)

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
    label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)

vqa = VQA(ANNFILE)
anns = vqa.getAnns()

frame = np.zeros((UI_Y2 + 1, UI_X4 + 1, 3), np.uint8)
line_color = (255, 255, 255)
# Horizontal lines
frame[UI_Y0, UI_X0:UI_X4, :] = line_color
frame[UI_Y1, UI_X0:UI_X3, :] = line_color
frame[UI_Y2, UI_X0:UI_X4, :] = line_color
# Vertical lines
frame[UI_Y0:UI_Y2, UI_X0, :] = line_color
frame[UI_Y0:UI_Y1, UI_X1, :] = line_color
frame[UI_Y0:UI_Y1, UI_X2, :] = line_color
frame[UI_Y0:UI_Y2, UI_X3, :] = line_color

cv2.imshow("ShadowWorld", frame)
cv2.moveWindow("ShadowWorld", 10, 10)