def _apply_array(self, arrays: tp.Sequence[np.ndarray], rng: np.random.RandomState) -> np.ndarray: # checks arrays = list(arrays) if len(arrays) != 2: raise Exception( "Crossover can only be applied between 2 individuals") shape = arrays[0].shape assert shape == arrays[ 1].shape, "Individuals should have the same shape" # settings axis = tuple(range(len(shape))) if self.axis is None else self.axis max_size = int( ((arrays[0].size + 1) / 2)**(1 / len(axis))) if self.max_size is None else self.max_size max_size = min(max_size, *(shape[a] - 1 for a in axis)) size = 1 if max_size == 1 else rng.randint(1, max_size) # slices slices = [] for a, s in enumerate(shape): if a in axis: if s <= 1: raise ValueError("Cannot crossover an shape with size 1") start = rng.randint(s - size) slices.append(slice(start, start + size)) else: slices.append(slice(0, s)) result = np.array(arrays[0], copy=True) result[tuple(slices)] = arrays[1][tuple(slices)] return result
def random_crop_and_scale_to_fit_target( src_img: np.array, target_width: int, target_height: int, rng: np.random.RandomState = np.random.RandomState()): # case 1: no cropping because size fits if src_img.shape[0] == target_height and src_img.shape[1] == target_width: return src_img crop_startY = 0 crop_startX = 0 img_height = src_img.shape[0] img_width = src_img.shape[1] s = np.min([ float(img_width) / float(target_width), float(img_height) / float(target_height) ]) cw = np.min([int(s * target_width), img_width]) ch = np.min([int(s * target_height), img_height]) crop_startX = rng.randint(0, img_width - cw + 1) crop_startY = rng.randint(0, img_height - ch + 1) cropped_img = src_img[crop_startY:crop_startY + ch, crop_startX:crop_startX + cw] resized_img = cv2.resize(cropped_img, (target_width, target_height), interpolation=cv2.INTER_LINEAR) return resized_img
def create_offspring_pairs(species: Species, amount_offspring: int, agent_id_generator: AgentIDGeneratorInterface, generation: Generation, rnd: np.random.RandomState, config: NeatConfig) -> List[Tuple[int, int, int]]: """ Create tuples, with ids of agents, that should be used in the crossover. :param species: the species, for which the crossover values should be generated :param amount_offspring: the amount of offspring for the given species :param agent_id_generator: the id generator for the agents :param generation the generation with all its members :param rnd: the random generator, to select the parents :param config: the neat config :return: a list with tuples. The tuple contains the id of the first parent, the second parent and the child id """ assert len(species.members) != 0 result_list = [] species_len = len(species.members) for i in range(amount_offspring): # TODO add probability only mutation # TODO add reproduction with different species first_parent_index = rnd.randint(species_len) second_parent_index = rnd.randint(species_len) new_agent_id = agent_id_generator.get_agent_id() first_parent_id = species.members[first_parent_index].id second_parent_id = species.members[second_parent_index].id result_list.append((first_parent_id, second_parent_id, new_agent_id)) return result_list
def GenerateDeadcodeMutations( kernels: typing.Iterator[str], rand: np.random.RandomState, num_permutations_of_kernel: int = 5, num_mutations_per_kernel: typing.Tuple[int, int] = (1, 5), ) -> typing.Iterator[str]: """Generate dead code mutations for a set of kernels. Args: rand: A random seed. kernels: The OpenCL kernels to mutate. num_permutations_of_kernel: The number of permutations of each kernel to generate. num_mutations_per_kernel: The minimum and maximum number of mutations to apply to each generated kernel. """ for kernel in kernels: for _ in range(num_permutations_of_kernel): # Apply random mutations to kernel and yield. rand_ = np.random.RandomState(rand.randint(0, int(1e9))) # Use all kernels (including the current one we're mutating) as candidates # for mutation. dci = OpenClDeadcodeInserter(rand_, kernel, candidate_kernels=kernels) # RandomState.randint() is in range [low,high), hence add one to max to # make it inclusive. num_mutations = rand.randint(num_mutations_per_kernel[0], num_mutations_per_kernel[1] + 1) for _ in range(num_mutations): dci.Mutate() yield dci.opencl_source
def init_board(size: int, state: numpy.random.RandomState) -> numpy.ndarray: """ Init board :param size: size of the board :param state: numpy random state :return: """ board = numpy.zeros((size, size), dtype=int) board[state.randint(0, size), state.randint(0, size)] = 2 return board
def salt_and_pepper(image, r: np.random.RandomState, s_vs_p=0.5, amount=0.004): # Salt sets pixels to all the way on for a channel num_salt = np.ceil(amount * image.size * s_vs_p) coords = [r.randint(0, i - 1, int(num_salt)) for i in image.shape] image[tuple(coords)] = 1 # Pepper sets pixels to all the way off for a channel num_pepper = np.ceil(amount * image.size * (1.0 - s_vs_p)) coords = [r.randint(0, i - 1, int(num_pepper)) for i in image.shape] image[tuple(coords)] = 0
def get_indices(length: int, full_length: int, method: Union[str, Method], random_state: np.random.RandomState): """ Get `start` and `stop` indices for a given slice method. Examples: >>> get_indices(3, 3, 'exact') (0, 3) >>> get_indices(3, 5, 'start') (0, 3) >>> get_indices(3, 5, 'stop') (2, 5) >>> get_indices(20, 30, 'middle', np.random.RandomState(42)) (5, 25) >>> get_indices(20, 30, 'edges', np.random.RandomState(42)) (8, 18) """ if isinstance(method, str): method = Method(method) assert method in Method if method in ["middle", "edges"] and random_state is None: random_state = np.random.RandomState() gap = full_length - length if method == Method.EXACT: assert length == full_length start = 0 stop = full_length elif method == Method.START: start = 0 stop = length elif method == Method.STOP: start = gap stop = start + length elif method == Method.MIDDLE: assert length >= 20 start = random_state.randint(min(10, gap // 2), max(gap - 10, gap // 2) + 1) stop = start + length elif method == Method.EDGES: assert length >= 20 start = random_state.randint(min(10, gap // 2), min(length - 10, gap) + 1) stop = full_length - (length - start) assert start >= 0, (length, full_length, method, start, stop) assert stop <= full_length, (length, full_length, method, start, stop) assert start <= stop, (length, full_length, method, start, stop) return start, stop
def get_neighbors(self, value: int, rs: np.random.RandomState, number: Union[int, float] = np.inf, transform: bool = False) -> \ List[Union[float, int, str]]: neighbors = [] # type: List[Union[float, int, str]] if number < len(self.choices): while len(neighbors) < number: rejected = True index = int(value) while rejected: neighbor_idx = rs.randint(0, self._num_choices) if neighbor_idx != index: rejected = False if transform: candidate = self._transform(neighbor_idx) else: candidate = float(neighbor_idx) if candidate in neighbors: continue else: neighbors.append(candidate) else: for candidate_idx, candidate_value in enumerate(self.choices): if int(value) == candidate_idx: continue else: if transform: candidate = self._transform(candidate_idx) else: candidate = float(candidate_idx) neighbors.append(candidate) return neighbors
def _sample(self, rs: np.random.RandomState, size: Union[int, None] = None) -> int: """ returns a random sample from our sequence as order/position index """ return rs.randint(0, self._num_elements, size=size)
def play_move(board: numpy.ndarray, side: int, state: numpy.random.RandomState) -> int: """ Play a move :param board: board :param side: side (0, 1, 2, 3) (down, right, up, left) :param state: numpy random state :return score """ score = 0 size = board.shape[0] direction = side % 2 # 0 down, 1 right sens = (direction == 0) * (side - 1) + (direction == 1) * (side - 2) # -1 down/right, 1 up/down for i in (range(size - 1, -1, -1) if sens == -1 else range(size)): for j in range(size): a = (i, j) if direction == 0 else (j, i) b = (i + sens, j) if direction == 0 else (j, i + sens) while 0 <= b[0] < size > b[1] >= 0 <= a[0] < size > a[1] >= 0 and (board[a] == board[b] or board[a] == 0): board[a] += board[b] score += board[b] board[b] = 0 a = (a[0] - sens, a[1]) if direction == 0 else (a[0], a[1] - sens) b = (b[0] - sens, b[1]) if direction == 0 else (b[0], b[1] - sens) if score != 0: x = state.randint(0, size ** 2) while board[x // size, x % size] != 0: x = (x + 1) % size ** 2 board[x // size, x % size] = 2 + 2 * (state.rand() > 0.8) return score
def random_date_img_tuple( dataset: Dict[str, torch.Tensor], writer: Union[int, None] = None, rand: np.random.RandomState = np.random.RandomState(seed=1234), **kwargs ) -> Tuple[Tuple[torch.Tensor, int], Tuple[torch.Tensor, int], Tuple[ torch.Tensor, int]]: """Compose a tuple of images of a valid date written by one person.""" # choose a consistent writer if writer is None: max_writer = min([d.shape[0] for d in dataset.values()]) - 1 writer = rand.randint(low=0, high=max_writer, size=1).item() # choose whether the writer prepends zeros to day and month leading_zero = rand.randn(1) > 0.6 return (random_day_img(dataset=dataset, writer=writer, rand=rand, leading_zero=leading_zero, **kwargs), random_month_img(dataset=dataset, writer=writer, rand=rand, leading_zero=leading_zero, **kwargs), random_year_img(dataset=dataset, writer=writer, rand=rand, **kwargs))
def _pad_edges_shorter( ds: DataSetGAN, length: int, target_length: int, random_state: np.random.RandomState, offset: Optional[int] = None, ): if offset is None: start = random_state.randint(0, target_length - length + 1) else: start = offset pad_end = max(0, target_length - length - start) new_seqs = [] for seq in ds.seqs: new_seq = b"." * start + seq + b"." * pad_end new_seqs.append(new_seq) new_adjs = [] for adj in ds.adjs: row = adj.row + start col = adj.col + start new_adj = sparse.coo_matrix((adj.data, (row, col)), dtype=adj.dtype, shape=(target_length, target_length)) new_adjs.append(new_adj) return new_seqs, new_adjs
def sample_idx(self, rs: np.random.RandomState, size: int): upper_bound = len(self) - size if upper_bound <= 0: raise ValueError( f'Network (size:{size}) is too large for noise table (size:{len(self)})' ) return rs.randint(0, upper_bound)
def _select_train_indices( self, n_samples: int, random_state: np.random.RandomState, y: Union[np.ndarray, pd.Series, pd.DataFrame, None], ) -> np.ndarray: return random_state.randint(n_samples, size=n_samples)
def _select_train_indices( self, n_samples: int, random_state: np.random.RandomState, y: Union[np.ndarray, pd.Series, pd.DataFrame, None], ) -> np.ndarray: mean_block_size = self.mean_block_size if mean_block_size < 1: # if mean block size was set as a percentage, calculate the actual mean # block size mean_block_size = n_samples * mean_block_size p_new_block = 1.0 / mean_block_size train = np.empty(n_samples, dtype=np.int64) for i in range(n_samples): if i == 0 or random_state.uniform() <= p_new_block: idx = random_state.randint(n_samples) else: # noinspection PyUnboundLocalVariable idx += 1 if idx >= n_samples: idx = 0 train[i] = idx return train
def _read_random_row_group( parquet_file: Path, columns: Union[dict, tuple], filters: List[Callable], random_state: np.random.RandomState, ) -> pd.DataFrame: """Read a random row group from an open `parquet_file_obj`. TODO: Refactor this ugly function to take fewer arguments. """ column_renames = _get_column_renames(columns) parquet_file_obj = pq.ParquetFile(parquet_file) row_group_idx = random_state.randint(parquet_file_obj.num_row_groups) logger.debug("Reading row group %s from parquet file '%s'.", row_group_idx, parquet_file) table = parquet_file_obj.read_row_group(row_group_idx, columns=list(columns), use_threads=True) df = table.to_pandas(use_threads=True) df = df.rename(columns=column_renames) for fn in filters: df = fn(df) df = df.sample(frac=1, random_state=random_state) assert not set(DataRow._fields) - set(df.columns) return df
def mkimg(r:np.random.RandomState): """Make a typical random image :param r: A random state for generating images :returns: a 10x10x10 uint8 array """ return r.randint(0, 256, size=(10, 10), dtype=np.uint8)
def split_train_test_userwise_random( df_: pd.DataFrame, user_colname: str, item_colname: str, item_ids: List[Any], heldout_ratio: float, n_heldout: Optional[int], rns: np.random.RandomState, rating_column: Optional[str] = None, ) -> UserTrainTestInteractionPair: """Split the user x item data frame into a pair of sparse matrix (represented as a UserDataSet). Parameters ---------- df_: user x item interaction matrix. user_colname: The column name for the users. item_colname: The column name for the items. item_id_to_iid: The mapper from item id to item index. If not supplied, create own mapping from df_. heldout_ratio: The percentage of items (per-user) to be held out as a test(validation) ones. n_heldout: The maximal number of items (per-user) to be held out as a test(validation) ones. rns: The random state rating_column: The column for the rating values. If None, the rating values will be all equal (1), by default None Returns ------- UserDataSet Resulting train-test split dataset. """ df_ = df_[df_[item_colname].isin(item_ids)] item_indices = pd.Categorical(df_[item_colname], categories=item_ids).codes user_ids, user_indices = np.unique(df_[user_colname], return_inverse=True) if rating_column is not None: data = df_[rating_column].values else: data = np.ones(df_.shape[0], dtype=np.int32) X_all = sps.csr_matrix( (data, (user_indices, item_indices)), shape=(len(user_ids), len(item_ids)), ) X_learn, X_predict = rowwise_train_test_split( X_all, heldout_ratio, n_heldout, random_seed=rns.randint(-(2**31), 2**31 - 1), ) return UserTrainTestInteractionPair(user_ids, X_learn.tocsr(), X_predict.tocsr(), item_ids)
def value_to_img( value: str, dataset: Dict[str, torch.Tensor], writer: Union[int, None] = None, spacing: float = 0.5, bkg_value: int = 255, fg_value: int = 0, check_input: bool = True, rand: np.random.RandomState = np.random.RandomState(seed=1234) ) -> torch.Tensor: """Convert a numerical value to an image of that value. Args: value: The integer that should be converted to an image. This is input as a string so that you can choose to have a leading zero or not, but it must be a string of numbers. dataset: A pre-sorted lookup of a given dataset, where dataset[key] is a tensor containing all instances of that class of image specified by the key label. writer: The MNIST writer whose handwriting we will use. An integer value from 0 to 5421. `None` will pick at random. spacing: Value that controls the blank space between digits. Float between 0 and 1, where 0 is tight spacing. bkg_value: Value for background pixels. fg_value: Value for foreground (digit) pixels. check_input: False will disable input checking for a possible speed-up, but it's probably negligible on a cpu. rand: For reproducibility, pass a seeded numpy random number generator object. Returns: img: An image of the hand-written value. """ if check_input: max_writer = min([d.shape[0] for d in dataset.values()]) - 1 if writer is None: writer = rand.randint(low=0, high=max_writer, size=1).item() else: assert (writer >= 0) and (writer < max_writer), \ f'writer must be >= 0 and less than the max, which is {max_writer}' # create blank canvas width = int(28 * (1 + 0.5 * (len(value) - 1) + 0.5 * spacing * (len(value) - 1))) + 1 img = torch.zeros((28, width)) step = int(28 * (0.5 + 0.5 * spacing)) ranges = zip(range(0, width - 28, step), range(28, width, step)) # paste value images for v, region in zip(value, list(ranges)): digit = dataset[v][writer, ...] img[:, region[0]:region[1]] = img[:, region[0]:region[1]] + digit img = torch.clamp(img, min=0., max=1.) img = img * (fg_value - bkg_value) + bkg_value return img
def next_lane( self, current_index: LaneIndex, route: Route = None, position: np.ndarray = None, # Don't change this, since we need to make map identical to old version. get_np_random is used for traffic only. np_random: np.random.RandomState = None ) -> LaneIndex: """ Get the index of the next lane that should be followed after finishing the current lane. - If a plan is available and matches with current lane, follow it. - Else, pick next road randomly. - If it has the same number of lanes as current road, stay in the same lane. - Else, pick next road's closest lane. :param current_index: the index of the current lane. :param route: the planned route, if any. :param position: the vehicle position. :param np_random: a source of randomness. :return: the index of the next lane to be followed when current lane is finished. """ assert np_random _from, _to, _id = current_index next_to = None # Pick next road according to planned route if route: if route[ 0][: 2] == current_index[: 2]: # We just finished the first step of the route, drop it. route.pop(0) if route and route[0][ 0] == _to: # Next road in route is starting at the end of current road. _, next_to, _ = route[0] elif route: logger.warning( "Route {} does not start after current road {}.".format( route[0], current_index)) # Randomly pick next road if not next_to: try: next_to = list(self.graph[_to].keys())[np_random.randint( len(self.graph[_to]))] except KeyError: # logger.warning("End of lane reached.") return current_index # If next road has same number of lane, stay on the same lane if len(self.graph[_from][_to]) == len(self.graph[_to][next_to]): next_id = _id # Else, pick closest lane else: lanes = range(len(self.graph[_to][next_to])) next_id = min(lanes, key=lambda l: self.get_lane( (_to, next_to, l)).distance(position)) return _to, next_to, next_id
def GenerateGraph( rand: np.random.RandomState, num_nodes_min_max, dimensions: int = 2, theta: float = 1000.0, rate: float = 1.0, weight_name: str = "distance", ) -> nx.Graph: """Creates a connected graph. The graphs are geographic threshold graphs, but with added edges via a minimum spanning tree algorithm, to ensure all nodes are connected. Args: rand: A random seed for the graph generator. num_nodes_min_max: A sequence [lower, upper) number of nodes per graph. dimensions: (optional) An `int` number of dimensions for the positions. Default= 2. theta: (optional) A `float` threshold parameters for the geographic threshold graph's threshold. Large values (1000+) make mostly trees. Try 20-60 for good non-trees. Default=1000.0. rate: (optional) A rate parameter for the node weight exponential sampling distribution. Default= 1.0. weight_name: The name for the weight edge attribute. Returns: The graph. """ # Sample num_nodes. num_nodes = rand.randint(*num_nodes_min_max) # Create geographic threshold graph. pos_array = rand.uniform(size=(num_nodes, dimensions)) pos = dict(enumerate(pos_array)) weight = dict(enumerate(rand.exponential(rate, size=num_nodes))) geo_graph = nx.geographical_threshold_graph(num_nodes, theta, pos=pos, weight=weight) # Create minimum spanning tree across geo_graph's nodes. distances = spatial.distance.squareform(spatial.distance.pdist(pos_array)) i_, j_ = np.meshgrid(range(num_nodes), range(num_nodes), indexing="ij") weighted_edges = list(zip(i_.ravel(), j_.ravel(), distances.ravel())) mst_graph = nx.Graph() mst_graph.add_weighted_edges_from(weighted_edges, weight=weight_name) mst_graph = nx.minimum_spanning_tree(mst_graph, weight=weight_name) # Put geo_graph's node attributes into the mst_graph. for i in mst_graph.nodes(): mst_graph.node[i].update(geo_graph.node[i]) # Compose the graphs. combined_graph = nx.compose_all((mst_graph, geo_graph.copy())) # Put all distance weights into edge attributes. for i, j in combined_graph.edges(): combined_graph.get_edge_data(i, j).setdefault(weight_name, distances[i, j]) return combined_graph
def sample_index(self, rng: np.random.RandomState) -> int: """Sample a random index within the table, taking into account the size of the noise vector. :param rng: Maze random number generator to be used. :return: A noise index to be passed to :meth:`maze.train.trainers.es.es_shared_noise_table.SharedNoiseTable.get`. """ return rng.randint(0, len(self.noise))
def mutate_add_node( genome: Genome, rnd: np.random.RandomState, generator: InnovationNumberGeneratorInterface, config: NeatConfig) -> (Genome, Node, Connection, Connection): """ Add with a given probability from the config a new node to the genome. A random connections is selected, which will be disabled. A new node will be placed between the in and out node of the connection. Then two new connections will be created, one which leads into the new node (weight=1) and one out (weight = weight of the disabled connection). :param genome: the genome that should be modified :param rnd: a random generator to determine if, the genome is mutated, and how :param generator: a generator for innovation number for nodes and connections :param config: a config that specifies the mutation params :return: the modified genome, as well as the generated node and the two connections (if they were mutated) """ # Check if node should mutate if rnd.uniform(0, 1) > config.probability_mutate_add_node: return genome, None, None, None selected_connection = genome.connections[rnd.randint( 0, len(genome.connections))] selected_connection.enabled = False in_node = next(x for x in genome.nodes if x.innovation_number == selected_connection.input_node) out_node = next(x for x in genome.nodes if x.innovation_number == selected_connection.output_node) # Select activation function either from one of the nodes new_node_activation = in_node.activation_function if rnd.uniform( 0, 1) <= 0.5 else out_node.activation_function new_node_x_position = (in_node.x_position + out_node.x_position) / 2 new_node = Node( generator.get_node_innovation_number(in_node, out_node), NodeType.HIDDEN, rnd.uniform(low=config.bias_initial_min, high=config.bias_initial_max), new_node_activation, new_node_x_position) new_connection_in = Connection(generator.get_connection_innovation_number( in_node, new_node), in_node.innovation_number, new_node.innovation_number, weight=1, enabled=True) new_connection_out = Connection(generator.get_connection_innovation_number( new_node, out_node), new_node.innovation_number, out_node.innovation_number, weight=selected_connection.weight, enabled=True) genome.nodes.append(new_node) genome.connections.append(new_connection_in) genome.connections.append(new_connection_out) return genome, new_node, new_connection_in, new_connection_out
def get_offset(length: int, random_state: np.random.RandomState): """Chose a random offset (useful for permutations, etc). Examples: >>> get_offset(10, np.random.RandomState(42)) 5 """ min_offset = min(10, length // 2) offset = random_state.randint(min_offset, length - min_offset + 1) return offset
def make_trials(system: VisionSystem, image_collection: ImageCollection, repeats: int, random: np.random.RandomState): # Get the true motions, for making trials true_motions = [ image_collection.images[frame_idx - 1].camera_pose.find_relative( image_collection.images[frame_idx].camera_pose) if frame_idx > 0 else None for frame_idx in range(len(image_collection)) ] # Make some plausible trial results trial_results = [] for repeat in range(repeats): start_idx = random.randint(0, len(image_collection) - 2) frame_results = [ FrameResult( timestamp=timestamp, image=image, pose=image.camera_pose, processing_time=random.uniform(0.001, 1.0), estimated_motion=true_motions[frame_idx].find_independent( Transform(location=random.normal(0, 1, 3), rotation=t3.quaternions.axangle2quat( random.uniform(-1, 1, 3), random.normal(0, np.pi / 2)), w_first=True)) if frame_idx > start_idx else None, tracking_state=TrackingState.OK if frame_idx > start_idx else TrackingState.NOT_INITIALIZED, num_matches=random.randint(10, 100)) for frame_idx, (timestamp, image) in enumerate(image_collection) ] frame_results[start_idx].estimated_pose = Transform() trial_settings = {'random': random.randint(0, 10), 'repeat': repeat} trial_result = SLAMTrialResult(system=system, image_source=image_collection, success=True, results=frame_results, has_scale=False, settings=trial_settings) trial_result.save() trial_results.append(trial_result) return trial_results
def _apply_array(self, arrays: tp.Sequence[np.ndarray], rng: np.random.RandomState) -> np.ndarray: arrays = list(arrays) assert len(arrays) == 1 data = arrays[0] if rng is None: rng = np.random.RandomState() axis = tuple(range(data.dim)) if self.axis is None else self.axis shifts = [rng.randint(data.shape[a]) for a in axis] return np.roll(data, shifts, axis=axis) # type: ignore
def cluster_into_random_sized_groups( orig_list: List[int], min_group_size: int, max_group_size: int, numpy_rng: np.random.RandomState) -> List[List[int]]: final_list = [] cnt = 0 while cnt < len(orig_list): size = numpy_rng.randint(min_group_size, max_group_size + 1) final_list.append(orig_list[cnt:cnt + size]) cnt += size return final_list
def sample_subtasks(rng: np.random.RandomState, pool: List[str], minimum_size: int, maximum_size: Optional[int] = None, replace: bool = False) -> List[str]: if maximum_size is not None: assert maximum_size <= len(pool), 'Invalid maximum_size.' maximum_size = maximum_size or len(pool) random_size = rng.randint(minimum_size, maximum_size + 1) sampled_subtasks = rng.choice(pool, size=random_size, replace=replace) return list(sampled_subtasks)
def random_crop_and_scale_to_fit_target( src_img: np.array, target_width: int, target_height: int, rng: np.random.RandomState = np.random.RandomState()): # case 1: no cropping because size fits if src_img.shape[0] == target_height and src_img.shape[1] == target_width: return src_img crop_startY = 0 crop_startX = 0 img_height = src_img.shape[0] img_width = src_img.shape[1] # cropped width cw = s * target_width, s scalar unknown # cropeed height ch = s * target_height, same s to maintain aspect ratio # maximize s so that cw and ch are within target image limits # 0 <= cw <= src_img_w # 0 <= ch <= src_img_h # 0 <= s * target_width <= src_img_w # 0 <= s * target_height <= src_img_h # s = min(src_img_w/target_width, src_img_h / target_height) s = np.min([ float(img_width) / float(target_width), float(img_height) / float(target_height) ]) cw = np.min([int(s * target_width), img_width]) ch = np.min([int(s * target_height), img_height]) crop_startX = rng.randint(0, img_width - cw + 1) crop_startY = rng.randint(0, img_height - ch + 1) cropped_img = src_img[crop_startY:crop_startY + ch, crop_startX:crop_startX + cw] resized_img = cv2.resize(cropped_img, (target_width, target_height), interpolation=cv2.INTER_LINEAR) return resized_img
def _make_slices(shape: tp.Tuple[int, ...], axes: tp.Tuple[int, ...], size: int, rng: np.random.RandomState) -> tp.List[slice]: slices = [] for a, s in enumerate(shape): if a in axes: if s <= 1: raise ValueError("Cannot crossover on axis with size 1") start = rng.randint(s - size) slices.append(slice(start, start + size)) else: slices.append(slice(s)) return slices
def _sample(self, rs: np.random.RandomState, size: int = None) -> Union[int, np.ndarray]: return rs.randint(0, self._num_choices, size=size)