def __init__( self, root_folder: Path, split: str = "validation", transform: Callable = None, ): """ Object Detection dataset. [extended_summary] :param root_folder: :param split: :param transform: """ super().__init__() self.transform = transform images_folder = root_folder / "images" if split == "train": all_folders = images_folder.glob(f"{split}_0" + r"[0-9]") all_images = chain( *[folder.glob(r"*.jpg") for folder in all_folders] ) else: images_folder /= split all_images = images_folder.glob(r"*.jpg") bbox_csv_filepath = root_folder.joinpath( "annotations", "boxes", f"{split}-annotations-bbox.csv" ) indices = tuple( BBOX_INDICES[key] for key in ( "LabelName", "XMin", "XMax", "YMin", "YMax", ) ) self.box_labels = multicolumn_csv_to_dict( bbox_csv_filepath, value_cols=indices, one_to_n_mapping=True ) images_with_labels = set(self.box_labels.keys()) self.images = [ image_path for image_path in all_images if image_path.stem in images_with_labels ] self.label_name_to_class_description = csv_to_dict( root_folder.joinpath( "annotations", "metadata", "class-descriptions-boxable.csv" ), discard_header=False, ) self.label_name_to_id = bidict( zip( self.label_name_to_class_description.keys(), range(len(self.label_name_to_class_description.keys())), ) )
def output_to_text(user_settings, output_settings, m_dir): import os # user_settings : path to csv of user settings e.g. config/settings/default.csv # output_settings : path to csv of output settings e.g. config/settings/output_settings.csv # output_dir : path to *.mat files user = csv_to_dict(user_settings) ott = csv_to_dict(output_settings) assert ott.has_key('singleFilename') # outfile = ott['singleFilename'] outfile = "out.txt" matfiles = [m_dir+"/"+f for f in os.listdir(m_dir) if f.endswith(".mat")] test = matfiles[0] fields = [x[0] for x in sio.whosmat(test)] print fields # print test # d = sio.loadmat(test) data = sio.loadmat(test) print type(data) delim = ","
def points(author_data, conference, journal): author_pubs = csv_to_dict(author_data) author = create_author_dict() for k, v in author_pubs.items(): if k in conference.keys(): author[conference.get(k)] += int(v) elif k in journal.keys(): author[journal.get(k)] += int(v) * 2 else: print("This {author_data.get(k)} does not exists!", "\nPlease correct the input file") result = calc_pub(author) return result
def init_tables(self): # read data files self.books = csv_to_dict('csv/books.csv') self.characters = csv_to_dict('csv/characters.csv') self.places = csv_to_dict('csv/places.csv') self.verbs = csv_to_dict('csv/verbs.csv') self.parts = csv_to_dict('csv/parts.csv') self.adjectives = csv_to_dict('csv/adjectives.csv')
""" Proof of Concept Read ports.csv and print out extra commands to run based on open port. """ from utils import csv_to_dict from commands_by_port import commands nmap_dict = csv_to_dict("/root/pentests/home/ports.csv") for row in nmap_dict: if row['port'] in commands: print("# " + commands[row['port']]['header']) print("# ***************************") for command in commands[row['port']]['commands']: print(command.format(ip=row['ipv4']))
# -*- coding: utf-8 -*-s __author__ = 'jorgesaldivar' import parser_rsssf, utils if __name__ == '__main__': dict_championships = [] meta_championships = utils.csv_to_dict('../data/campeonatos.csv') for meta_championship in meta_championships: if int(meta_championship['year']) <= 2007: print('Championship %s', meta_championship['name']) dict_championships.append(parser_rsssf.get_data(meta_championship)) else: print('here') print('Finished!')
ET.SubElement(bndbox, "ymin").text = str(int(float(label_tokens[3]))) ET.SubElement(bndbox, "xmax").text = str(int(float(label_tokens[4]))) ET.SubElement(bndbox, "ymax").text = str(int(float(label_tokens[5]))) if contains: raw_string = ET.tostring(root, "utf-8") reparsed = xml.dom.minidom.parseString(raw_string) file = open(os.path.join(save_path, jpg + ".xml"), "w") file.write(reparsed.toprettyxml(indent="\t")) file.close() if __name__ == "__main__": # result_dir = "/home/sakulaki/code/yolo-pre-trained/darknet/results" # classes_list = ["ASCUS", "LSIL", "ASCH", "HSIL", "SCC"] # dict_pic_info = get_predictions_result(result_dir, classes_list) # img_size = 608 # save_path = "/home/sakulaki/dataset/realtest/608/XB1800118" # det = 0.3 # prediction_convert(dict_pic_info, classes_list, img_size, save_path, det) csv_file = "D:/2018-08-13-test_jpg/2018-08-13-14_15_41/2018-08-13-14_15_41_s.csv" dict_ = csv_to_dict(csv_file) classes = ["ASCUS", "LSIL", "ASCH", "HSIL", "SCC"] img_size = 608 save_path = "D:/2018-08-13-test_jpg/2018-08-13-14_15_41/xmls" det = 0.9 prediction_convert(dict_, classes, img_size, save_path, det)
import sys from utils import csv_to_dict from core import run if __name__ == "__main__": conference = csv_to_dict("../data/conference_qualis.csv") journal = csv_to_dict("../data/journal_qualis.csv") path = '.' if len(sys.argv) > 1: persons = sys.argv[1:] run(persons, conference, journal) else: sys.exit(1) """if sys.argv[1] == '-h' or sys.argv[1] == '--help' or sys.argv[1] == '-H': print('This file require 3 parameters, that follows:') print('The first represents witch file do you want to execute, in order:') print('\t 1 - papers_data_creation.py this file generate a new csv,', 'where each line represents a paper.') print('\t 2 - split_program this file generate a new csv, where each', 'line represents a "programa".') print('\t 3 - qualis_by_year this file create a new csv file, where', 'each line represent a researcher.') print('\n The second parameter represent the input file and the location') print('\n The last parameter represents the output file and the location') else: dataIn = sys.argv[2] dataOut = name_the_file(sys.argv[3], sys.argv[1].split('.')[0]) num = int(sys.argv[1]) if num == 1: papers_data_creation.run(dataIn, dataOut) elif num == 2:
def get_stats(self): """ Returns the status message of the database as dictionary. """ return utils.csv_to_dict(self.proto.stat())