Пример #1
0
def select_subset(input: NDArray,
                  *,
                  num: int = None,
                  ratio: float = None) -> Tuple[NDArray, NDArray]:
    if (num is None and ratio is None) or (num is not None
                                           and ratio is not None):
        raise ValueError('Either num or percent must be given.')

    if ratio is not None:
        num = int(input.shape[0] * ratio)

    input_size = input.shape[0]
    sub_set_size = num
    remaining_set_size = input_size - sub_set_size

    input_indx = np.arange(input_size)

    rng = default_rng()

    sub_set_indx = np.sort(
        rng.choice(input_indx, size=sub_set_size, replace=False))
    assert len(sub_set_indx) == sub_set_size

    remaining_set_indx = np.delete(input_indx, sub_set_indx)
    assert len(remaining_set_indx) == remaining_set_size

    sub_set = input[sub_set_indx]
    remaining_set = input[remaining_set_indx]

    return sub_set, remaining_set
Пример #2
0
    def __init__(self, name_file_config):
        self.config = ParseConfig("File/" + name_file_config)
        self.pop_size = self.config.get_population_size()
        self.seed_num = self.config.get_random_seed()
        self.ngen = self.config.get_ngen()
        self.crossover_prob = self.config.get_crossover_prob()
        self.mutation_prob = self.config.get_mutation_prob()
        self.mate = self.config.get_crossover()
        self.mutate = self.config.get_mutation()
        self.Fitness = Fitness(self.config)

        self.player = Player()
        self.pset = gp.PrimitiveSet("MAIN", 0)

        self.pset.addPrimitive(self.player.if_box_ahead, 2)
        self.pset.addPrimitive(self.player.prog2, 2)
        self.pset.addPrimitive(self.player.prog3, 3)
        self.pset.addPrimitive(self.player.prog4, 4)
        self.pset.addTerminal(self.player.move_left)
        self.pset.addTerminal(self.player.move_right)
        self.pset.addTerminal(self.player.move_down)
        self.pset.addTerminal(self.player.move_up)

        creator.create("fitness", base.Fitness, weights=(-1.0, ))
        creator.create("Individual", gp.PrimitiveTree, fitness=creator.fitness)

        self.toolbox = base.Toolbox()

        # executor = ThreadPoolExecutor()
        # self.toolbox.register("map", executor.map)

        # Attribute generator
        self.toolbox.register("expr_init",
                              gp.genFull,
                              pset=self.pset,
                              min_=0,
                              max_=1)

        # Structure initializers
        self.toolbox.register("individual", tools.initIterate,
                              creator.Individual, self.toolbox.expr_init)
        self.toolbox.register("population", tools.initRepeat, list,
                              self.toolbox.individual)

        rng = default_rng(int(self.seed_num))
        all_levels = range(0, 20)
        self.train_set = rng.choice(20, size=14, replace=False)
        self.test_set = [
            item for item in all_levels if item not in self.train_set
        ]

        self.toolbox.register("evaluate", self.evalPlayer)
        # todo change select function
        self.toolbox.register("select", tools.selTournament, tournsize=7)
        self.toolbox.register("mate", self.mate)
        self.toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
        self.toolbox.register("mutate",
                              self.mutate,
                              expr=self.toolbox.expr_mut,
                              pset=self.pset)
 def __init__(
     self,
     pen_table: Union[np.array,
                      PenetranceTables] = np.array([[0.0, 0.0, 1.0],
                                                    [0.0, 0.0, 1.0],
                                                    [1.0, 1.0, 2.0]]),
     penetrance_base: float = 0.25,
     penetrance_diff: Optional[float] = None,
     snp1: Optional[Variant] = None,
     snp2: Optional[Variant] = None,
     random_seed: int = 1855,
 ):
     """
     Parameters
     ----------
     pen_table: 3x3 np array or PenetranceTables enum
         Penetrance values.  Will be scaled between 0 and 1 if needed.
     penetrance_base: float, default 0.25
         Baseline to use in the final penetrance table, must be in [0,1]
     penetrance_diff: optional float, default None (use 1-2*penetrance_base)
         Difference between minimum and maximimum probabilities in the penetrance table.
         penetrance_base + penetrance_diff must be in [0,1]
     snp1: Optional[Variant]
     snp2: Optional[Variant]
     random_seed: int, default 1855
     """
     pen_table, snp1, snp2 = self._validate_params(pen_table,
                                                   penetrance_base,
                                                   penetrance_diff, snp1,
                                                   snp2)
     self.pen_table = pen_table
     self.snp1 = snp1
     self.snp2 = snp2
     self._random_seed = random_seed
     self.rng = default_rng(self._random_seed)
Пример #4
0
def main():
    _, _, test_dataset = generate_subsets()

    # LOAD MODEL TRAINED (ONLY BEST IS SAVED)
    best_model = load_model("./cnn_model.hdf5")

    predicted_probabilities = best_model.predict_generator(
        generator=test_dataset, verbose=1)

    # create column array with predicted labels
    predicted_labels = (predicted_probabilities >=
                        PREDICTION_THRESHOLD).reshape(-1, )
    true_labels = test_dataset.classes[test_dataset.index_array]

    print(
        pd.DataFrame(
            confusion_matrix(true_labels, predicted_labels),
            index=[["Actual", "Actual"], ["ok", "defect"]],
            columns=[["Predicted", "Predicted"], ["ok", "defect"]],
        ))

    print(classification_report(true_labels, predicted_labels, digits=4))

    test_indexes = test_dataset.index_array
    rng = default_rng()
    random_indexes = rng.choice(len(test_indexes), size=16, replace=False)
    plot_results("random", test_dataset, random_indexes, true_labels,
                 predicted_probabilities)

    misclassified_indexes = np.nonzero(predicted_labels != true_labels)[0]
    plot_results("missed", test_dataset, misclassified_indexes, true_labels,
                 predicted_probabilities)
Пример #5
0
def generate_random_8puzzle():
    # generate 8-puzzles until a solvable puzzle is created, return the solvable puzzle
    while 1:
        r = default_rng()
        # select random numbers 0-9 without reselecting numbers, create an array and reshape to 3x3 matrix
        state = np.array(
            r.choice(9, size=9, replace=False).reshape((3, 3)), np.int32)
        if check_solvable(state):
            return state
Пример #6
0
def generate(mapsize):
    plt.figure(figsize=(mapsize / 100, mapsize / 100))

    rivers = range(0, round(mapsize / 3000))
    bendyness = 1
    if len(rivers) == 1 and np.random.rand() < 0.5:
        rivers = [1]

    for i in rivers:
        rng = default_rng()

        x = np.array(sorted(rng.choice(100, size=10, replace=False)))
        y = np.array(sorted(rng.choice(100, size=10, replace=False)))

        xnew = np.linspace(0, mapsize / 100, mapsize * 10)

        spl = make_interp_spline(x, y, k=1)
        y_smooth = spl(xnew)

        lwidths = []
        for each in xnew:
            if i != 0:
                lwidths.append(np.random.randint(50, 52))
            else:
                lwidths.append(np.random.randint(71, 73))

        for k in range(len(y_smooth)):
            if i % 2 == 0:
                y_smooth[k] = mapsize / 100 - y_smooth[k]
            elif i % 3 == 0:
                xnew[k] = mapsize / 100 - xnew[k]

        plt.scatter(xnew, y_smooth, s=lwidths, color='k')

    plt.axis('off')
    plt.grid(False)

    ax = plt.gca()
    ax.set_xlim(0.0, mapsize / 100)
    ax.set_ylim(mapsize / 100, 0.0)

    plt.tight_layout()
    plt.savefig('./data/river_img.png',
                pad_inches=0,
                bbox_inches=None,
                metadata=None)
Пример #7
0
def get_stream(name):
    config_path = path.join(path.dirname(__file__), f'{name}.yaml')
    stream_config = load_yaml(config_path)
    config = load_default_config(stream_config)
    recursive_update(config, stream_config)
    return init_component(default_rng(), **config)['task_gen']
 def set_random_seed(self, new_seed: int):
     """
     Reset the random number generator with the specified seed.
     """
     self._random_seed = new_seed
     self.rng = default_rng(self._random_seed)
def main():
    if len(argv) < 6:
        stderr.write(
            f"USAGE: {argv[0]} [path to road network GEOJSON file] [path to simplified building data] "
            f"[path to mappings file] [path to emissions file] [output path]")
        exit(1)

    with open(argv[1], 'r', encoding='utf-8') as file:
        network = RoadNetwork(file)

    buildings = load_buildings(argv[2], z_order=Z_ORDER_BUILDING)

    with open(argv[3], 'r', encoding='utf-8') as file:
        reader = csv.reader(file)
        next(reader)

        mappings = {}
        for row in reader:
            vehicle = int(row[0])
            bldg = int(row[4])
            mapped_count = int(row[8])

            buildings[bldg]["count"] = mapped_count
            mappings[vehicle] = {
                "building": bldg,
                "x": float(row[2]),
                "y": float(row[3]),
                "distance": float(row[7])
            }

    counts = np.fromiter(
        filter(lambda x: x > 0, map(lambda b: b["count"], buildings.values())),
        float)
    if LOG_SCALING:
        counts = np.log(counts)

    cq1, cq3, cf1, cf2 = fences(counts)
    bldg_norm = Normalize(0, cf2, clip=True)

    # Set up figure and axes:
    fig = plt.figure()
    ax: Axes = fig.add_subplot()

    plot_roads(ax,
               network,
               color_roads=False,
               plot_nodes=False,
               z_order=Z_ORDER_ROAD,
               alpha=0.25,
               default_road_color='#FFFFFF')

    for bldg in buildings.values():
        edge_alpha = 0.75
        alpha = 0.5

        if bldg["count"] <= 0:
            bldg["color"] = (0, 0, 0, 0)
            edge_alpha = 0.5
        else:
            if LOG_SCALING:
                count = np.log(bldg["count"])
            else:
                count = bldg["count"]

            bldg["color"] = HEAT_CM_1(bldg_norm(count))

        patch = bldg["poly"]
        patch.set_edgecolor((0, 0, 0, edge_alpha))
        patch.set_facecolor(bldg["color"])
        patch.set_fill(True)
        ax.add_patch(patch)

    vids = np.fromiter(mappings.keys(), int, len(mappings))
    n = min(len(mappings), 2500)
    rng = default_rng(SEED)

    sample = rng.choice(vids, size=n, replace=False)

    with open(argv[4], 'r', encoding='utf-8') as file:
        emissions = EmissionsSnapshot.load(file)
    hmap, link_cells, max_value = comp_all(network, emissions)
    em_norm = Normalize(0, max_value, clip=True)
    ax.imshow(hmap,
              cmap=HEAT_CM_2_ALPHA,
              zorder=Z_ORDER_HEAT,
              extent=(X_MIN, X_MAX, Y_MIN, Y_MAX))

    ax.set_xlim(X_MIN, X_MAX)
    ax.set_ylim(Y_MIN, Y_MAX)

    ax.set_xticks([])
    ax.set_yticks([])
    fig.colorbar(cm.ScalarMappable(norm=bldg_norm, cmap=HEAT_CM_1),
                 label="Mapped Vehicle Count")
    fig.colorbar(cm.ScalarMappable(norm=em_norm, cmap=HEAT_CM_2),
                 label="Emissions Quantity (MMBtu)")
    plt.savefig(argv[5])
    plt.show()