def locations(rack_locations_path=RACKS_LOCATION_CSV): with open(rack_locations_path, 'r') as file: csv_file = DictReader(file, ["latitude", "longitude", "icon", "desc", "racks_count", "parking_places"]) acc = [] csv_file.__next__() # Skip the header for attributes in csv_file: acc.append(RacksLocation(attributes)) return acc
def __next__(self): obj = DictReader.__next__(self) if self.schema: data, errors = self.schema.load(obj) if errors: raise ValidationError(errors, data=data) obj = data return obj
def parse_csv(csv_path: str) -> List[BfoUser]: csv_file = None try: csv_file = open(csv_path, "r", newline='') except Exception: print(f"Could not open csv file at path {csv_path}") exit(5) users: List[BfoUser] = [] reader = DictReader(csv_file, list(BfoUser.__slots__)) reader.__next__() # Throw out header for user_dict in reader: users += [ BfoUser(user_dict['name'], user_dict['description'], user_dict['underneath'], user_dict['url']) ] csv_file.close() return users
def test_research_study_title(study, transformed_dir): specimens = transformed_dir / 'specimen.tsv' assert specimens.exists(), "Specimen file exists?" with specimens.open() as f: reader = DictReader(f, delimiter='\t') # The way the transformation works, all of the specimen in a given file will have # the same study name line = reader.__next__() assert line['STUDY|NAME'] == study.title, "Is the title correct?"
def f(path): label_path = path + 'validation.csv' predict_path = path + 'submission.csv' label_reader = DictReader(open(label_path)) predict_reader = DictReader(open(predict_path)) y_true = [] y_scores = [] for t, row in enumerate(label_reader): predict = predict_reader.__next__() actual = float(row['label']) predicted = float(predict['prob']) y_true.append(actual) y_scores.append(predicted) # Compute ROC curve and ROC area fpr, tpr, threshold = roc_curve(y_true, y_scores, pos_label=1) roc_auc = auc(fpr, tpr) return fpr, tpr, roc_auc
def load_authenication(data_path): """Loads a Couchbase username and password Args: data_path: String that stores the full path to folder that contains the Austin Tx Unified 311 open dataset Returns: authenticator: Dictionary that stores a Couchbase username and password""" password_file = os.path.join(data_path, '.password', 'password.txt') with open(password_file, 'rt') as h_file: keys = h_file.readline().rstrip().split(',') readerobj = DictReader(h_file, keys) authentication = readerobj.__next__() return authentication
def test_three_does_not_stomp(self): logging.getLogger().setLevel(logging.CRITICAL) self.manager._migrate_one() self.manager._migrate_two() f = PathManager.open_input_file('input.csv', 'w+') f.write('foo\nspecial') f.close() self.manager._migrate_three() f = PathManager.open_input_file('input.csv.bak', 'r') contents = f.read() self.assertEqual('foo\nspecial', contents) f.close() f = PathManager.open_input_file('input.csv', 'r') dict_reader = DictReader(f) first_row = dict_reader.__next__() self.assertTrue('foo' in first_row.keys()) self.assertEqual(first_row['foo'], 'special') f.close()
def read_log(filename): """ Read a logfile from jiminy. This function supports both text (csv) and binary log. Parameters: - filename: Name of the file to load. Retunrs: - A dictionnary containing the logged values, and a dictionnary containing the constants. """ if is_log_binary(filename): # Read binary file using C++ parser. data_dict, constants_dict = Engine.read_log_binary(filename) else: # Read text csv file. constants_dict = {} with open(filename, 'r') as log: constants_str = next(log).split(', ') for c in constants_str: c_split = c.split('=') # Remove line end for last constant. constants_dict[c_split[0]] = c_split[1].strip('\n') # Read data from the log file, skipping the first line (the constants). data = {} reader = DictReader(log) for key, value in reader.__next__().items(): data[key] = [value] for row in reader: for key, value in row.items(): data[key].append(value) for key, value in data.items(): data[key] = np.array(value, dtype=np.float64) # Convert every element to array to provide same API as the C++ parser, # removing spaces present before the keys. data_dict = {k.strip(): np.array(v) for k, v in data.items()} return data_dict, constants_dict
def __next__(self): # For Python 3 row = DictReader.__next__(self) for (att, func) in self._casts.items(): row[att] = func(row[att]) return row
data_path = sys.argv[1] result_path = sys.argv[2] label_path = data_path + 'validation.csv' predict_path = result_path + 'submission.csv' label_reader = DictReader(open(label_path)) predict_reader = DictReader(open(predict_path)) count = 0 y_true = [] y_pred = [] y_scores = [] for t, row in enumerate(label_reader): predict = predict_reader.__next__() actual = float(row['Label']) predicted = float(predict['Predicted']) y_true.append(actual) y_scores.append(predicted) # 大于阈值的即视为点击 if (predicted >= 0.5): y_pred.append(1) else: y_pred.append(0) count += 1 # 计算性能指标 auc = roc_auc_score(y_true, y_scores)
threshold = 0.5 label_path = path + 'validation.csv' predict_path = path + 'submission.csv' label_reader = DictReader(open(label_path)) predict_reader = DictReader(open(predict_path)) count = 0 y_true = [] y_pred = [] y_scores = [] for t, row in enumerate(label_reader): predict = predict_reader.__next__() actual = float(row['label']) predicted = float(predict['prob']) y_true.append(actual) y_scores.append(predicted) # 大于阈值的即视为点击 if predicted >= threshold: y_pred.append(1) else: y_pred.append(0) count += 1 # 计算性能指标 auc = roc_auc_score(y_true, y_scores) logloss = log_loss(y_true, y_scores)