def get_cls(): cls_list = [] data_path = r'/data0/linjingkai/DataSet/AWA/testclasses.txt' cls_file = io.open(data_path, 'r') cls_name = ' ' while cls_name != '': # 读取类别的名称并去掉换行符 cls_name = cls_file.readline().rstrip('\n') # 确保没有加入无效的名称 if len(cls_name) != 0: cls_list.append(cls_name) print("当前运行类别为:", cls_list) print(" ") cls_file.close() return cls_list
if __name__ == '__main__': #pass in the username of the account you want to download get_all_tweets("@AngelAlessandra") import io # Instantiates a client client = vision.ImageAnnotatorClient() for i in range(1, 25): # The name of the image file to annotate file_name = os.path.join(os.path.dirname(__file__), '%03d.jpg' % (i)) font = ImageFont.truetype("C:\Windows\Fonts\Arial.ttf", 30) #font im = Image.open(file_name) #drawopen # Loads the image into memory with io.open(file_name, 'rb') as image_file: content = image_file.read() image = types.Image(content=content) # Performs label detection on the image file response = client.label_detection(image=image) labels = response.label_annotations print('image%dlabels:' % i) j = 1 for label in labels: j = j + 1 print(label.description) print(label.score) draw = ImageDraw.Draw(im)
annFile='%s/annotations/instances_%s.json'%(dataDir,dataType) # In[3]: # initialize COCO api for instance annotations coco=COCO(annFile) # get all images containing given categories, select one at random catIds = coco.getCatIds(catNms=['person']) imgIds = coco.getImgIds(catIds=catIds) data=[] for i in range(1,len(imgIds)): print "Processing image ",i,"/",len(imgIds) id_image=imgIds[i] img = coco.loadImgs(id_image)[0] print img['file_name'] shutil.copy('%s/images/%s/%s'%(dataDir,dataType,img['file_name']),'../person_image') annIds = coco.getAnnIds(imgIds=id_image, catIds=catIds, iscrowd=None) anns = coco.loadAnns(annIds) bbox_list=[] for j in range(0,len(anns)): bbox_list.append(anns[j]['bbox']) data.append({'file_name':img['file_name'],'image_id':id_image,'bbox':bbox_list}) import io, json with io.open('bboxes.json', 'w', encoding='utf-8') as f: f.write(unicode(json.dumps(data, ensure_ascii=False)))
def run(self): os.chdir(self.coco_path) #self.append_coco.emit("Current Path: "+self.coco_path) path = "." # ROI arrays filenames = [] zips = [] dirs = [] # scanning for d in os.walk(path): for dir in d: for r, d, f in os.walk(str(dir)): for file in f: if self.txt in file: filenames.append(os.path.join(r, file)) elif ".zip" in file: zips.append(os.path.join(r, file)) # Sorting zips.sort() filenames.sort() # looping and decoding... #self.append_coco.emit(zips) self.progressBar_setMaximum.emit(len(filenames)) first_filenum = int(filenames[0].replace( self.txt, "").replace("-", " ").split(" ")[-1]) has_zero = False if first_filenum == 0: has_zero = True else: has_zero = False for j in range(len(zips)): for i in range(len(filenames)): self.progressBar.emit(i + 1) # declare ROI file roi = read_roi.read_roi_zip(zips[j]) roi_list = list(roi.values()) # ROI related file informations filename = filenames[i].replace("./", "") im = cv2.imread("./" + filename) h, w, c = im.shape size = os.path.getsize("./" + filename) try: f = open("via_region_data.json") original = json.loads(f.read()) #self.append_coco.emit("Writing..."+str(zips[j])) # Do something with the file except FileNotFoundError: #self.append_coco.emit("File not exisited, creating new file...") original = {} data = { filename + str(size): { "fileref": "", "size": size, "filename": filename, "base64_img_data": "", "file_attributes": {}, "regions": {}, } } # write json for a in roi_list: try: filename2 = filename.replace( self.txt, "").replace("-", " ").split(" ") roi_name = a["name"].replace( "-", " ").split(" ") roi_num = int(roi_name[0]) file_num = int(filename2[-1]) if has_zero: roi_num -= 1 if file_num == roi_num: #print(has_zero) #print(int(filename2[-1]), " ", roi_num) #print(a) x_list = a["x"] y_list = a["y"] for l in range(len(x_list)): if x_list[l] >= w: x_list[l] = w # self.append_coco.emit(x_list[j]) for k in range(len(y_list)): if y_list[k] >= h: y_list[k] = h #self.append_coco.emit(y_list[k]) # parameters x_list.append(a["x"][0]) y_list.append(a["y"][0]) regions = { str(a): { "shape_attributes": { "name": "polygon", "all_points_x": x_list, "all_points_y": y_list, }, "region_attributes": { "name": dirname(dir) }, } } data[filename + str(size)]["regions"].update( regions) original.update(data) except KeyError: #Line Exception if a['type'] == "line": x1 = a['x1'] x2 = a['x2'] y1 = a['y1'] y2 = a['y2'] width = a['width'] new_x_list = [] new_y_list = [] if (x1 - x2) == 0: slope = 0 else: slope = (y1 - y2) / (x1 - x2) slope_a = (-1) / slope midpoint = [(x1 + x2) / 2, (y1 + y2) / 2] #print("斜率: ",slope) #print(slope_a) x = Symbol('x') weight = solve( x**2 + (x * slope_a)**2 - (width / 2)**2, x) new_x_list.append(int(x1 - (weight[1]))) new_x_list.append(int(x1 + (weight[1]))) new_x_list.append(int(x2 - (weight[0]))) new_x_list.append(int(x2 + (weight[0]))) new_x_list.append(int(x1 - (weight[1]))) new_y_list.append( int(y1 - (weight[1] * slope_a))) new_y_list.append( int(y1 + (weight[1] * slope_a))) new_y_list.append( int(y2 - (weight[0] * slope_a))) new_y_list.append( int(y2 + (weight[0] * slope_a))) new_y_list.append( int(y1 - (weight[1] * slope_a))) #print("x坐標", new_x_list) #print("y坐標", new_y_list) #fix index out of bound exception for j in range(len(new_x_list)): if (new_x_list[j] >= 2160): new_x_list[j] = 2159 #print(new_x_list[j]) for k in range(len(new_y_list)): if (new_y_list[k] >= 2160): new_y_list[k] = 2159 #print(new_y_list[k]) regions = { str(a): { "shape_attributes": { "name": "polygon", "all_points_x": new_x_list, "all_points_y": new_y_list }, "region_attributes": { "name": dirname(dir) } } } data[filename + str(size)]["regions"].update( regions) original.update(data) elif a['type'] == "oval": TWO_PI = np.pi * 2 angles = 128 angle_shift = TWO_PI / angles phi = 0 center_x = (2 * (a['left']) + a['width']) / 2 center_y = (2 * a['top'] + a['height']) / 2 x_list = [] y_list = [] for i in range(angles): phi += angle_shift x_list.append( int(center_x + (a['width'] * np.cos(phi) / 2))) y_list.append( int(center_y + (a['height'] * np.sin(phi) / 2))) print(x_list) print(y_list) regions = { str(a): { "shape_attributes": { "name": "polygon", "all_points_x": x_list, "all_points_y": y_list, }, "region_attributes": { "name": dirname(dir) }, } } data[filename + str(size)]["regions"].update( regions) original.update(data) except IndexError: self.append_coco.emit( "[ERROR] Can't find any type specific files! (Maybe check the file type)" ) with io.open("via_region_data.json", "w", encoding="utf-8") as f: f.write( json.dumps(original, ensure_ascii=False)) self.append_coco.emit("[INFO] Converted Successfully!")
def run_func(self, zips, filenames, json_name, folder): count2 = 0 # Sorting zips.sort() filenames.sort() # looping and decoding... #self.append_coco.emit(zips) for j in range(len(zips)): for i in range(len(filenames)): count2 += 1 # declare ROI file roi = read_roi.read_roi_zip(self.coco_path + "/" + zips[j]) roi_list = list(roi.values()) # ROI related file informations filename = filenames[i].replace("./", "") im = cv2.imread(self.coco_path + '/' + filename) h, w, c = im.shape size = os.path.getsize(self.coco_path + '/' + filename) try: f = open(json_name) original = json.loads(f.read()) f.close() #self.append_coco.emit("Writing..."+str(zips[j])) # Do something with the file except ValueError: # includes simplejson.decoder.JSONDecodeError print('Decoding JSON has failed') except FileNotFoundError: #self.append_coco.emit("File not exisited, creating new file...") original = {} data = { filename + str(size): { "fileref": "", "size": size, "filename": filename, "base64_img_data": "", "file_attributes": {}, "regions": {}, } } for a in roi_list: try: filename2 = filename.replace(self.txt, "").replace( "-", " ").split(" ") roi_name = a["name"].replace("-", " ").split(" ") roi_num = int(roi_name[0]) file_num = int(filename2[-1]) first_filenum = int(filenames[0].replace( self.txt, "").replace("-", " ").split(" ")[-1]) has_zero = False if first_filenum == 0: has_zero = True else: has_zero = False if has_zero: roi_num -= 1 if file_num == roi_num: #print(has_zero) #print(int(filename2[-1]), " ", roi_num) #print(a) x_list = a["x"] y_list = a["y"] for l in range(len(x_list)): if x_list[l] >= w: x_list[l] = w - 1 # self.append_coco.emit(x_list[j]) for k in range(len(y_list)): if y_list[k] >= h: y_list[k] = h - 1 #self.append_coco.emit(y_list[k]) # parameters x_list.append(a["x"][0]) y_list.append(a["y"][0]) regions = { str(a): { "shape_attributes": { "name": "polygon", "all_points_x": x_list, "all_points_y": y_list, }, "region_attributes": { "name": dirname(folder) }, } } data[filename + str(size)]["regions"].update(regions) original.update(data) except KeyError: #Line Exception if a['type'] == "line": x1 = a['x1'] x2 = a['x2'] y1 = a['y1'] y2 = a['y2'] width = a['width'] new_x_list = [] new_y_list = [] if (x1 - x2) == 0: slope = 0 else: slope = (y1 - y2) / (x1 - x2) slope_a = (-1) / slope midpoint = [(x1 + x2) / 2, (y1 + y2) / 2] #print("斜率: ",slope) #print(slope_a) x = Symbol('x') weight = solve( x**2 + (x * slope_a)**2 - (width / 2)**2, x) new_x_list.append(int(x1 - (weight[1]))) new_x_list.append(int(x1 + (weight[1]))) new_x_list.append(int(x2 - (weight[0]))) new_x_list.append(int(x2 + (weight[0]))) new_x_list.append(int(x1 - (weight[1]))) new_y_list.append(int(y1 - (weight[1] * slope_a))) new_y_list.append(int(y1 + (weight[1] * slope_a))) new_y_list.append(int(y2 - (weight[0] * slope_a))) new_y_list.append(int(y2 + (weight[0] * slope_a))) new_y_list.append(int(y1 - (weight[1] * slope_a))) #print("x坐標", new_x_list) #print("y坐標", new_y_list) #fix index out of bound exception for j in range(len(new_x_list)): if (new_x_list[j] >= h): new_x_list[j] = h - 1 #print(new_x_list[j]) for k in range(len(new_y_list)): if (new_y_list[k] >= w): new_y_list[k] = w - 1 #print(new_y_list[k]) regions = { str(a): { "shape_attributes": { "name": "polygon", "all_points_x": new_x_list, "all_points_y": new_y_list }, "region_attributes": { "name": dirname(folder) } } } data[filename + str(size)]["regions"].update(regions) original.update(data) elif a['type'] == "oval": TWO_PI = np.pi * 2 angles = 128 angle_shift = TWO_PI / angles phi = 0 center_x = (2 * (a['left']) + a['width']) / 2 center_y = (2 * a['top'] + a['height']) / 2 x_list = [] y_list = [] for i in range(angles): phi += angle_shift x_list.append( int(center_x + (a['width'] * np.cos(phi) / 2))) y_list.append( int(center_y + (a['height'] * np.sin(phi) / 2))) regions = { str(a): { "shape_attributes": { "name": "polygon", "all_points_x": x_list, "all_points_y": y_list, }, "region_attributes": { "name": dirname(folder) }, } } data[filename + str(size)]["regions"].update(regions) original.update(data) except IndexError or FileNotFoundError: self.append_coco.emit( "[ERROR] Can't find any type specific files! (Maybe check the file type)" ) with io.open(json_name, "w", encoding="utf-8") as f: f.write(json.dumps(original, ensure_ascii=False)) f.close() print("Converted File: ", json_name)
# initialize COCO api for instance annotations coco = COCO(annFile) # get all images containing given categories, select one at random catIds = coco.getCatIds(catNms=['person']) imgIds = coco.getImgIds(catIds=catIds) data = [] for i in range(1, len(imgIds)): print "Processing image ", i, "/", len(imgIds) id_image = imgIds[i] img = coco.loadImgs(id_image)[0] print img['file_name'] shutil.copy('%s/images/%s/%s' % (dataDir, dataType, img['file_name']), '../person_image') annIds = coco.getAnnIds(imgIds=id_image, catIds=catIds, iscrowd=None) anns = coco.loadAnns(annIds) bbox_list = [] for j in range(0, len(anns)): bbox_list.append(anns[j]['bbox']) data.append({ 'file_name': img['file_name'], 'image_id': id_image, 'bbox': bbox_list }) import io, json with io.open('bboxes.json', 'w', encoding='utf-8') as f: f.write(unicode(json.dumps(data, ensure_ascii=False)))
def zip2coco(self): self.get_coco() os.chdir(self.coco_path) path = "." # ROI arrays filenames = [] zips = [] dirs = [] # scanning for d in os.walk(path): for dir in d: for r, d, f in os.walk(str(dir)): for file in f: if self.format_txt.toPlainText() in file: filenames.append(os.path.join(r, file)) elif ".zip" in file: zips.append(os.path.join(r, file)) # Sorting zips.sort() filenames.sort() # looping and decoding... print(zips) for j in range(len(zips)): for i in range(len(filenames)): # declare ROI file roi = read_roi.read_roi_zip(zips[j]) roi_list = list(roi.values()) # ROI related file informations filename = filenames[i].replace(".\\", "").replace(".\\", "") im = cv2.imread(".\\" + filename) h, w, c = im.shape size = os.path.getsize(filename) try: f = open("via_region_data.json") original = json.loads(f.read()) #print("Writing..."+str(zips[j])) # Do something with the file except FileNotFoundError: print("File not exisited, creating new file...") original = {} data = { filename + str(size): { "fileref": "", "size": size, "filename": filename, "base64_img_data": "", "file_attributes": {}, "regions": {}, } } # write json length = len(list(roi.values())) self.progressBar.setMaximum(length) for a in range(length): filename2 = filename.replace( self.format_txt.toPlainText(), "").replace("-", " ").split(" ") roi_name = roi_list[a]["name"].replace( "-", " ").split(" ") filenum = "" if int(filename2[:1]) > 10 and int( filename2[:1]) < 100: filenum = "00" + str(int(filename2[:1])) elif int(filename2[:1]) > 100 and int( filename2[:1]) < 1000: filenum = "0" + str(int(filename2[:1])) elif int(filename2[:1]) > 1 and int( filename2[:1]) < 10: filenum = "000" + str(int(filename2[:1])) elif int(filename2[:1]) > 1000 and int( filename2[:1]) < 10000: filenum = str(int(filename2[:1])) if filenum == roi_name[0]: print("roi_name: ", roi_name[0], "filename: ", filenum) x_list = roi_list[a]["x"] y_list = roi_list[a]["y"] for l in range(len(x_list)): if x_list[l] >= w: x_list[l] = w # print(x_list[j]) for k in range(len(y_list)): if y_list[k] >= h: y_list[k] = h # print(y_list[k]) # parameters x_list.append(roi_list[a]["x"][0]) y_list.append(roi_list[a]["y"][0]) regions = { str(a): { "shape_attributes": { "name": "polygon", "all_points_x": x_list, "all_points_y": y_list, }, "region_attributes": { "name": dirname(dir).replace("-ROI", " ") + "-" + str(j) }, } } data[filename + str(size)]["regions"].update(regions) original.update(data) with io.open("via_region_data.json", "w", encoding="utf-8") as f: f.write(json.dumps(original, ensure_ascii=False))