Пример #1
0
def load_data_sub(_file):
    x_data = []
    x_label = []
    csv_data = csv.read_csv(_file)
    csv_to_use = csv_data[1:]
    for idx, parts in enumerate(csv_to_use):
        if idx == 1000:
            break
        image, label_vec = process_line(parts)
        x_data.append(image)
        x_label.append(label_vec)
    return x_data, x_label
Пример #2
0
    def create_zones_from_conf(conf_path):
        zone_conf = read_csv(conf_path)
        zones = []
        for line in zone_conf:
            coords = line[1:]

            pts = []
            for i in range(0, len(coords), 2):
                x, y = coords[i], coords[i + 1]
                pts.append([float(x), float(y)])
            zones.append(Zone(line[0], np.array(pts)))
        return zones
Пример #3
0
def generate_batch_size(path: str, batch_size: int):
    csv_data = csv.read_csv(path)
    csv_to_use = csv_data[1:]
    features = []
    target = []
    while True:
        for idx, parts in enumerate(csv_to_use):
            image, label_vec = process_line(parts)
            features.append(image)
            target.append(label_vec)
            if (idx + 1) % batch_size == 0:
                yield process_data(features, target)
                features = []
                target = []
        yield process_data(features, target)
        features = []
        target = []
Пример #4
0
class Statistics:
    def __init__(self, min: float, p01: float, p10: float, median: float, p90: float, p99: float, max: float):
        self.min = min
        self.p01 = p01
        self.p10 = p10
        self.median = median
        self.p90 = p90
        self.p99 = p99
        self.max = max

    def __repr__(self):
        return f'(min: {self.min}, p1: {self.p01}, p10: {self.p10}, median: {self.median}, p90: {self.p90}, p99: {self.p99}, max: {self.max})'


if __name__ == '__main__':
    meta = read_csv(metadata_filename, has_header=True, transform=metadata.from_line)
    light_curves = read_csv(light_curves_filename, has_header=True, transform=record.from_line)

    target_of = _.index_by(meta, lambda it: it.object_id)

    p99_9_flux_err = _.percentile_by(light_curves, 99.9, lambda it: it.flux_err).flux_err
    good_points, bad_points = _.partition(light_curves, lambda it: it.flux_err <= p99_9_flux_err)

    light_curves_by_class = _.group_by(good_points, lambda it: target_of[it.object_id].target)

    statistics = {}
    for target, target_records in light_curves_by_class.items():
        light_curves_by_passband = _.group_by(target_records, lambda it: it.passband)

        statistics[target] = [None for _ in range(6)]
        for passband, records in light_curves_by_passband.items():