def check_array(self, array, shape, dtype, copy=True): """ Check that a given array is compatible with the FFTW plans, in terms of alignment and data type. If the provided array does not meet any of the checks, a new array is returned. """ if array.shape != shape: raise ValueError("Invalid data shape: expected %s, got %s" % (shape, array.shape) ) if array.dtype != dtype: raise ValueError("Invalid data type: expected %s, got %s" % (dtype, array.dtype) ) if self.check_alignment and not(pyfftw.is_byte_aligned(array)): array2 = pyfftw.zeros_aligned(self.shape, dtype=self.dtype_in) np.copyto(array2, array) else: if copy: array2 = np.copy(array) else: array2 = array return array2
def copyto(dst, src): """Copies the elements of an ndarray to those of another one. This function can copy the CPU/GPU arrays to the destination arrays on another device. Args: dst (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`): Destination array. src (`numpy.ndarray`, `cupy.ndarray` or `ideep4py.mdarray`): Source array. """ if isinstance(dst, numpy.ndarray): numpy.copyto(dst, _cpu._to_cpu(src)) elif isinstance(dst, intel64.mdarray): intel64.ideep.basic_copyto( dst, _cpu._to_cpu(src)) elif isinstance(dst, cuda.ndarray): if isinstance(src, chainer.get_cpu_array_types()): src = numpy.asarray(src) if dst.flags.c_contiguous or dst.flags.f_contiguous: dst.set(src) else: cuda.cupy.copyto(dst, cuda.to_gpu(src, device=dst.device)) elif isinstance(src, cuda.ndarray): cuda.cupy.copyto(dst, src) else: raise TypeError('cannot copy from non-array object of type {}' .format(type(src))) else: raise TypeError('cannot copy to non-array object of type {}'.format( type(dst)))
def laplacian(grid, out): np.copyto(out, grid) out *= -4 out += np.roll(grid, +1, 0) out += np.roll(grid, -1, 0) out += np.roll(grid, +1, 1) out += np.roll(grid, -1, 1)
def guess_data_type(orig_values, namask=None): """ Use heuristics to guess data type. """ valuemap, values = None, orig_values is_discrete = is_discrete_values(orig_values) if is_discrete: valuemap = sorted(is_discrete) coltype = DiscreteVariable else: # try to parse as float orig_values = np.asarray(orig_values) if namask is None: namask = isnastr(orig_values) values = np.empty_like(orig_values, dtype=float) values[namask] = np.nan try: np.copyto(values, orig_values, where=~namask, casting="unsafe") except ValueError: tvar = TimeVariable('_') try: values[~namask] = [tvar.parse(i) for i in orig_values[~namask]] except ValueError: coltype = StringVariable # return original_values values = orig_values else: coltype = TimeVariable else: coltype = ContinuousVariable return valuemap, values, coltype
def _copyto(a, val, mask): """ Replace values in `a` with NaN where `mask` is True. This differs from copyto in that it will deal with the case where `a` is a numpy scalar. Parameters ---------- a : ndarray or numpy scalar Array or numpy scalar some of whose values are to be replaced by val. val : numpy scalar Value used a replacement. mask : ndarray, scalar Boolean array. Where True the corresponding element of `a` is replaced by `val`. Broadcasts. Returns ------- res : ndarray, scalar Array with elements replaced or scalar `val`. """ if isinstance(a, np.ndarray): np.copyto(a, val, where=mask, casting='unsafe') else: a = a.dtype.type(val) return a
def fdtd(input_grid, steps): grid = input_grid.copy() old_grid = np.zeros_like(input_grid) previous_grid = np.zeros_like(input_grid) l_x = grid.shape[0] l_y = grid.shape[1] for i in range(steps): np.copyto(previous_grid, old_grid) np.copyto(old_grid, grid) for x in range(l_x): for y in range(l_y): grid[x,y] = 0.0 if 0 < x+1 < l_x: grid[x,y] += old_grid[x+1,y] if 0 < x-1 < l_x: grid[x,y] += old_grid[x-1,y] if 0 < y+1 < l_y: grid[x,y] += old_grid[x,y+1] if 0 < y-1 < l_y: grid[x,y] += old_grid[x,y-1] grid[x,y] /= 2.0 grid[x,y] -= previous_grid[x,y] return grid
def testTensorAccessor(self): """Check that tensor returns a reference.""" array_ref = self.interpreter.tensor(self.input0) np.copyto(array_ref(), self.initial_data) self.assertAllEqual(array_ref(), self.initial_data) self.assertAllEqual( self.interpreter.get_tensor(self.input0), self.initial_data)
def acartesian2(*arrays): """Array cartesian product in 2d Produces a new ndarray that has the cartesian product of every row in the input arrays. The number of columns is the sum of the number of columns in each input. The number of rows is the product of the number of rows in each input. Arguments --------- *arrays : [ndarray (xi, s)] """ rows = prod(a.shape[0] for a in arrays) columns = sum(a.shape[1] for a in arrays) dtype = arrays[0].dtype # should always have at least one role assert all(a.dtype == dtype for a in arrays), \ "all arrays must have the same dtype" result = np.zeros((rows, columns), dtype) pre_row = 1 post_row = rows pre_column = 0 for array in arrays: length, width = array.shape post_row //= length post_column = pre_column + width view = result[:, pre_column:post_column] view.shape = (pre_row, -1, post_row, width) np.copyto(view, array[:, None]) pre_row *= length pre_column = post_column return result
def __call__(self, key, value): key = self.path + key.lstrip('/') if not self.strict and key not in self.npz: return value if isinstance(self.ignore_names, (tuple, list)): ignore_names = self.ignore_names else: ignore_names = (self.ignore_names,) for ignore_name in ignore_names: if isinstance(ignore_name, str): if key == ignore_name: return value elif callable(ignore_name): if ignore_name(key): return value else: raise ValueError( 'ignore_names needs to be a callable, string or ' 'list of them.') dataset = self.npz[key] if dataset[()] is None: return None if value is None: return dataset elif isinstance(value, numpy.ndarray): numpy.copyto(value, dataset) elif isinstance(value, cuda.ndarray): value.set(numpy.asarray(dataset)) else: value = type(value)(numpy.asarray(dataset)) return value
def extract_features(annotation, image_size=(64, 64)): n = len(annotation) for i, a in enumerate(annotation): print_progress_bar(i, n) image_path = a["image"] label = a["label"] image = cv2.imread(image_path) if image is None: continue image = cv2.resize(image, image_size, image) image_channels = cv2.split(image) for channel_idx, channel in enumerate(image_channels): np.copyto(net.blobs["data"].data[0, channel_idx, :, :], channel) # image = np.dstack(cv2.split(image)) # np.copyto(net.blobs["data"].data, image) # net.blobs["data"].data = image output_blobs = net.forward(end="conv1", blobs=["conv1", ]) channels_num = output_blobs["conv1"].shape[1] channels = [output_blobs["conv1"][0, i, :, :] for i in range(channels_num)] features = cv2.merge(channels) output_dir = join(args.features_dir, "positives" if label else "negatives") if not isdir(output_dir): mkdir(output_dir) feature_map_path = join(output_dir, splitext(basename(image_path))[0] + ".pkl") pkl.dump(features, file(feature_map_path, "w")) stop_progress_bar()
def npArrayToReadOnlySharedArray(npArray): '''Returns a shared memory array for a numpy array. Used to reduce memory footprint when passing parameters to multiprocess pools''' SharedBase = multiprocessing.sharedctypes.RawArray(ctypes.c_float, npArray.shape[0] * npArray.shape[1]) SharedArray = np.ctypeslib.as_array(SharedBase) SharedArray = SharedArray.reshape(npArray.shape) np.copyto(SharedArray, npArray) return SharedArray
def embed(cover,secret,pos,skip): file=open("in.txt","w") multiple=False coverMatrix=pgm_to_mat(cover) secretMatrix=pgm_to_mat(secret) stegoMatrix=np.zeros(np.shape(coverMatrix), dtype=np.complex_) np.copyto(stegoMatrix,coverMatrix) dummy="" if(skip<1): skip=1 multiple=True for a in range(0,len(secretMatrix)): for b in range(0,len(secretMatrix)): dummy+=np.binary_repr(secretMatrix[a][b],width=8) #file.write(np.binary_repr(secretMatrix[a][b],width=8)+"\n") index=0 for a in range(0,len(stegoMatrix)*len(stegoMatrix),skip): rown=int(a % len(stegoMatrix)) coln=int(a / len(stegoMatrix)) if(index>=len(dummy)): break stegoMatrix[coln][rown] = ( int(coverMatrix[coln][rown]) & ~(1 << hash(coln,rown,pos) )) | (int(dummy[index],2) << hash(coln,rown,pos)) index += 1 if(multiple): stegoMatrix[coln][rown] = (int(stegoMatrix[coln][rown]) & ~(1 << (3-hash(coln, rown, pos)))) | ( int(dummy[index], 2) << (3-hash(coln, rown, pos))) index += 1 file.write(np.binary_repr(int(stegoMatrix[coln][rown]), 8) + "\n") return stegoMatrix
def binary_to_net(weights, spm_stream, ind_stream, codebook, num_nz): bits = np.log2(codebook.size) if bits == 4: slots = 2 elif bits == 8: slots = 1 else: print "Not impemented,", bits sys.exit() code = np.zeros(weights.size, np.uint8) # Recover from binary stream spm = np.zeros(num_nz, np.uint8) ind = np.zeros(num_nz, np.uint8) if slots == 2: spm[np.arange(0, num_nz, 2)] = spm_stream % (2**4) spm[np.arange(1, num_nz, 2)] = spm_stream / (2**4) else: spm = spm_stream ind[np.arange(0, num_nz, 2)] = ind_stream% (2**4) ind[np.arange(1, num_nz, 2)] = ind_stream/ (2**4) # Recover the matrix ind = np.cumsum(ind+1)-1 code[ind] = spm data = np.reshape(codebook[code], weights.shape) np.copyto(weights, data)
def laplacian(grid, out): copyto(out, grid) multiply(out, -4.0, out) roll_add(grid, +1, 0, out) roll_add(grid, -1, 0, out) roll_add(grid, +1, 1, out) roll_add(grid, -1, 1, out)
def learn_parameters(adj_list, q, c_max, nb_iterations, crit_infer = 0.2, crit_learn = 0.2, tmax_infer = 12, tmax_learn = 8): """ Learns the true group parameters of a given graph, and returns the optimal group assignment given by the belief propagation algorithm """ # Number of nodes on the graph N = len(adj_list) # Optimal values given by the algorithm f_min = 0 groups_opt = np.zeros(N, dtype = np.int8) for _ in range(nb_iterations): # Random initialization of each group's size n = np.random.rand(q) n /= np.sum(n) # Random initialization of the edge matrix c = np.random.rand(q, q) for i in range(q - 1): for j in range(i + 1, q): c[j, i] = c[i, j] c *= c_max # Application of the BP_learning algorithm for these initialized values groups, f_BP = BP_learning(q, n, c, adj_list, crit_infer, crit_learn, tmax_infer, tmax_learn) # Updates the optimal values found if f_BP < f_min: f_min = f_BP np.copyto(groups_opt, groups) # Returns the optimal group assignment found return groups_opt
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4): """Draws mask on an image. Args: image: uint8 numpy array with shape (img_height, img_height, 3) mask: a uint8 numpy array of shape (img_height, img_height) with values between either 0 or 1. color: color to draw the keypoints with. Default is red. alpha: transparency value between 0 and 1. (default: 0.4) Raises: ValueError: On incorrect data type for image or masks. """ if image.dtype != np.uint8: raise ValueError('`image` not of type np.uint8') if mask.dtype != np.uint8: raise ValueError('`mask` not of type np.uint8') if np.any(np.logical_and(mask != 1, mask != 0)): raise ValueError('`mask` elements should be in [0, 1]') if image.shape[:2] != mask.shape: raise ValueError('The image has spatial dimensions %s but the mask has ' 'dimensions %s' % (image.shape[:2], mask.shape)) rgb = ImageColor.getrgb(color) pil_image = Image.fromarray(image) solid_color = np.expand_dims( np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3]) pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA') pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L') pil_image = Image.composite(pil_solid_color, pil_image, pil_mask) np.copyto(image, np.array(pil_image.convert('RGB')))
def draw_bounding_box_on_image_array(image, ymin, xmin, ymax, xmax, color='red', thickness=4, display_str_list=(), use_normalized_coordinates=True): """Adds a bounding box to an image (numpy array). Bounding box coordinates can be specified in either absolute (pixel) or normalized coordinates by setting the use_normalized_coordinates argument. Args: image: a numpy array with shape [height, width, 3]. ymin: ymin of bounding box. xmin: xmin of bounding box. ymax: ymax of bounding box. xmax: xmax of bounding box. color: color to draw bounding box. Default is red. thickness: line thickness. Default value is 4. display_str_list: list of strings to display in box (each to be shown on its own line). use_normalized_coordinates: If True (default), treat coordinates ymin, xmin, ymax, xmax as relative to the image. Otherwise treat coordinates as absolute. """ image_pil = Image.fromarray(np.uint8(image)).convert('RGB') draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color, thickness, display_str_list, use_normalized_coordinates) np.copyto(image, np.array(image_pil))
def _applyTo(self, image): noise_array = np.empty(np.prod(image.array.shape), dtype=float) noise_array.reshape(image.array.shape)[:,:] = image.array # cf. PoissonNoise._applyTo function frac_sky = self.sky_level - image.dtype(self.sky_level) # 0 if dtype = float int_sky = self.sky_level - frac_sky if self.sky_level != 0.: noise_array += self.sky_level # First add the poisson noise from the signal + sky: if self.gain > 0.: noise_array *= self.gain # convert to electrons noise_array = noise_array.clip(0.) # The noise_image now has the expectation values for each pixel with the sky added. self._pd.generate_from_expectation(noise_array) # Subtract off the sky, since we don't want it in the final image. noise_array /= self.gain # Now add the read noise: if self.read_noise > 0.: self._gd.clearCache() self._gd.add_generate(noise_array) if frac_sky != 0.: noise_array -= frac_sky np.copyto(image.array, noise_array.reshape(image.array.shape), casting='unsafe') if int_sky != 0.: image -= int_sky
def draw_bounding_boxes_on_image_array(image, boxes, color='red', thickness=4, display_str_list_list=()): """Draws bounding boxes on image (numpy array). Args: image: a numpy array object. boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The coordinates are in normalized format between [0, 1]. color: color to draw bounding box. Default is red. thickness: line thickness. Default value is 4. display_str_list_list: list of list of strings. a list of strings for each bounding box. The reason to pass a list of strings for a bounding box is that it might contain multiple labels. Raises: ValueError: if boxes is not a [N, 4] array """ image_pil = Image.fromarray(image) draw_bounding_boxes_on_image(image_pil, boxes, color, thickness, display_str_list_list) np.copyto(image, np.array(image_pil))
def conditional_logsumexp(where, axis): masked = -np.ones(a.shape) * np.inf np.copyto(masked, a, where = where) masked_sum = logsumexp(masked, axis = axis) #np.copyto(masked_sum, -np.ones(masked_sum.shape) * np.inf, where = np.isnan(masked_sum)) np.place(masked_sum, np.isnan(masked_sum), -np.inf) return masked_sum
def _applyTo(self, image): noise_array = np.empty(np.prod(image.array.shape), dtype=float) noise_array.reshape(image.array.shape)[:,:] = image.array # Minor subtlety for integer images. It's a bit more consistent to convert to an # integer with the sky still added and then subtract off the sky. But this isn't quite # right if the sky has a fractional part. So only subtract off the integer part of the # sky at the end. For float images, you get the same answer either way, so it doesn't # matter. frac_sky = self.sky_level - image.dtype(self.sky_level) int_sky = self.sky_level - frac_sky if self.sky_level != 0.: noise_array += self.sky_level # Make sure no negative values noise_array = noise_array.clip(0.) # The noise_image now has the expectation values for each pixel with the sky added. self._pd.generate_from_expectation(noise_array) # Subtract off the sky, since we don't want it in the final image. if frac_sky != 0.: noise_array -= frac_sky # Noise array is now the correct value for each pixel. np.copyto(image.array, noise_array.reshape(image.array.shape), casting='unsafe') if int_sky != 0.: image -= int_sky
def test_array_maskna_to_nomask(): # Assignment from an array with NAs to a non-masked array, # excluding the NAs with a mask a = np.array([[2,np.NA,5],[1,6,np.NA]], maskna=True) mask = np.array([[1,0,0],[1,1,0]], dtype='?') badmask = np.array([[1,0,0],[0,1,1]], dtype='?') expected = np.array([[2,1,2],[1,6,5]]) # With masked indexing b = np.arange(6).reshape(2,3) b[mask] = a[mask] assert_array_equal(b, expected) # With copyto b = np.arange(6).reshape(2,3) np.copyto(b, a, where=mask) assert_array_equal(b, expected) # With masked indexing b = np.arange(6).reshape(2,3) def asn(): b[badmask] = a[badmask] assert_raises(ValueError, asn) # With copyto b = np.arange(6).reshape(2,3) assert_raises(ValueError, np.copyto, b, a, where=badmask)
def apply_qt_elements_filtering(image): # # apply bilateral filter on IMAGE to smooth colors while keeping edges cv2.bilateralFilter(UtilityOperations.crop_to_720p(image), 3, 255, 50, dst=MultiprocessOperations.shared_memory_image) # # crop elements from IMAGE MultiprocessOperations.wait_for_element_cropping() # # upload RED channels to GPU TheanoOperations.__shared_red.set_value( MultiprocessOperations.shared_memory_red_channel, borrow=True) # # upload GREEN channels to GPU TheanoOperations.__shared_green.set_value( MultiprocessOperations.shared_memory_green_channel, borrow=True) # # upload BLUE channels to GPU TheanoOperations.__shared_blue.set_value( MultiprocessOperations.shared_memory_blue_channel, borrow=True) # # download FILTERING result from GPU np.copyto(MultiprocessOperations.shared_memory_qt_filtered_elements, TheanoOperations.__apply_binary_filtering) # # apply IMAGE threshold CLASSIFICATION MultiprocessOperations.wait_for_qt_image_classification() return MultiprocessOperations.shared_memory_qt_filtered_elements
def _load_flat_grad(self, flat_grad): start = 0 for g in self._grad_buffers: size = g.size np.copyto(g, np.reshape(flat_grad[start:start + size], g.shape)) start += size return
def serialize(self, serializer): """Serializes the link object. Args: serializer (~chainer.AbstractSerializer): Serializer object. """ d = self.__dict__ for name in self._params: serializer(name, d[name].data) for name in self._persistent: d[name] = serializer(name, d[name]) if (self.has_uninitialized_params and isinstance(serializer, chainer.serializer.Serializer)): raise ValueError("uninitialized parameters cannot be serialized") for name in self._uninitialized_params.copy(): # Note: There should only be uninitialized parameters # during deserialization. initialized_value = serializer(name, None) self.add_param(name, initialized_value.shape) uninitialized_value = d[name].data if isinstance(uninitialized_value, numpy.ndarray): numpy.copyto(uninitialized_value, initialized_value) elif isinstance(uninitialized_value, cuda.ndarray): uninitialized_value.set(numpy.asarray(initialized_value))
def lieberfit(Data,Order=5): import numpy as np NewCurve = np.zeros(shape=(Data.shape[0])) OldCurve = np.array(Data) Diff = NewCurve-OldCurve Convergence = np.dot(Diff,Diff) m = 0 while Convergence > 1: # Suggest setting convergence criteria == pixel resolution P = np.polyfit(range(len(Data)),OldCurve,Order) NewCurve = np.polyval(P,range(len(Data))) np.copyto(OldCurve, NewCurve, where = NewCurve < OldCurve) m+=1 Diff = NewCurve - OldCurve Convergence = np.dot(Diff,Diff) #print('Iterations needed for convergence: ',m,sep='') CurveFit=np.copy(NewCurve) return (CurveFit)
def use(self, dataset): """ Computes and returns the outputs of the Learner for ``dataset``: - the outputs should be a Numpy 2D array of size len(dataset) by (nb of classes + 1) - the ith row of the array contains the outputs for the ith example - the outputs for each example should contain the predicted class (first element) and the output probabilities for each class (following elements) Argument ``dataset`` is an MLProblem object. """ outputs = np.zeros((len(dataset), self.n_classes + 1)) errors = np.zeros((len(dataset), 2)) ## PUT CODE HERE ## # row[0] is input.csv image (array), row[1] actual target class for that image for ind, row in enumerate(dataset): # fill 2nd element with loss errors[ind, 1] = self.fprop(row[0], row[1]) # predicted class outputs[ind, 0] = np.argmax(self.hs[-1]) # 0/1 classification error errors[ind, 0] = (outputs[ind, 0] != row[1]) # print "errors: ", errors[ind, ] # add output probs np.copyto(outputs[ind, 1:], self.hs[-1]) # print "outputs: ", outputs[ind,] # time.sleep(5) return outputs, errors
def set_rho(self, rho): """ Set the initial density matrix :param rho: 2D numpy array or sting containing the density matrix :return: self """ if isinstance(rho, str): # density matrix is supplied as a string ne.evaluate("%s + 0j" % rho, local_dict=vars(self), out=self.rho) elif isinstance(rho, np.ndarray): # density matrix is supplied as an array # perform the consistency checks assert rho.shape == self.rho.shape,\ "The grid size does not match with the density matrix" # make sure the density matrix is stored as a complex array np.copyto(self.rho, rho.astype(np.complex)) else: raise ValueError("density matrix must be either string or numpy.array") # normalize self.rho /= self.rho.trace() * self.dX return self
def revert_all(self, clear=False): '''return the image to it's original state''' np.copyto(self.image, self.ref_image) if clear: self.clear_history() self.drawing.reset()
def __init__(self, W = 20, H = 20): """ Inicializa Variables """ self.WIDTH = W self.HEIGHT = H self.prevState = numpy.zeros((self.HEIGHT,self.WIDTH)) self.nextState = numpy.zeros((self.HEIGHT,self.WIDTH)) numpy.copyto(self.prevState,self.nextState)
def get_shape_composition(self, LABEL_MAP, query_shapes, query_scores, exemplar_shapes): image_outputs = {} for i in range(0, self.TOP_K): image_outputs[i] = {} image_outputs[i]['shape_im'] = np.zeros( (np.size(LABEL_MAP, 0), np.size(LABEL_MAP, 1), 3)) image_outputs[i]['part_im'] = np.zeros( (np.size(LABEL_MAP, 0), np.size(LABEL_MAP, 1), 3)) image_outputs[i]['comp_mask'] = np.zeros( (np.size(LABEL_MAP, 0), np.size(LABEL_MAP, 1))) image_outputs[i]['mask'] = np.zeros( (np.size(LABEL_MAP, 0), np.size(LABEL_MAP, 1))) for i in range(0, len(query_shapes)): ith_score = query_scores[i]['score'] if ith_score.size == 0: continue # get the box info-- ith_bbx = query_shapes[i]['bbox'] ith_org_mask = query_shapes[i]['comp_shape'] # get the parts feat -- if self.IS_PARTS == 1: ith_part_feat = self.get_part_feat( query_shapes[i]['comp_context']) for l in range(0, np.size(ith_score, 0)): lth_nn_img = np.zeros( (np.size(LABEL_MAP, 0), np.size(LABEL_MAP, 1), 3)) lth_nn_part_img = np.zeros( (np.size(LABEL_MAP, 0), np.size(LABEL_MAP, 1), 3)) lth_nn = exemplar_shapes[ith_score[l, 6]] lth_nn_rgb = np.empty_like(lth_nn['comp_rgb']) np.copyto(lth_nn_rgb, lth_nn['comp_rgb']) lth_nn_context = np.empty_like(lth_nn['comp_context']) np.copyto(lth_nn_context, lth_nn['comp_context']) lth_nn_rgb = cv2.resize(lth_nn_rgb, dsize=( np.size(ith_org_mask, 1), np.size(ith_org_mask, 0))) lth_nn_rgb[:, :, 0] = lth_nn_rgb[:, :, 0]*ith_org_mask lth_nn_rgb[:, :, 1] = lth_nn_rgb[:, :, 1]*ith_org_mask lth_nn_rgb[:, :, 2] = lth_nn_rgb[:, :, 2]*ith_org_mask lth_nn_img[ith_bbx[0]:ith_bbx[2], ith_bbx[1]:ith_bbx[3], :] = lth_nn_rgb if self.IS_PARTS == 1: lth_part_feat = self.get_part_feat(lth_nn_context) lth_part_scores = self.get_part_scores( ith_part_feat, lth_part_feat) lth_nn_part_img[ith_bbx[0]:ith_bbx[2], ith_bbx[1]:ith_bbx[3], :] = \ self.get_part_image(lth_nn['org_rgb'], lth_part_scores, ith_org_mask) image_outputs[l]['part_im'] = image_outputs[l]['part_im'] + \ lth_nn_part_img image_outputs[l]['shape_im'] = image_outputs[l]['shape_im'] + lth_nn_img image_outputs[l]['comp_mask'] = ~((lth_nn_img[:, :, 0] == 0) & ( lth_nn_img[:, :, 1] == 0) & (lth_nn_img[:, :, 2] == 0)) image_outputs[l]['mask'] = image_outputs[l]['mask'] + \ image_outputs[l]['comp_mask'] return image_outputs
def setStateSpace(self, _A, _B, _C, _D): assert _A.shape == (self.n_states, self.n_states) assert _B.shape == (self.n_states, self.n_inputs) assert _C.shape == (self.n_outputs, self.n_states) assert _D.shape == (self.n_outputs, self.n_inputs) np.copyto(self.A, _A) np.copyto(self.B, _B) np.copyto(self.C, _C) np.copyto(self.D, _D) # State Transition Matrix np.copyto(self.state_trans, expm(self.A * self.d_time)) np.copyto(self.B_dt, self.B * self.d_time)
def setInput(self, _u): assert _u.shape == (self.n_inputs, 1) np.copyto(self.u, _u)
def cholesky(in_arr, out_arr, n): np.copyto(out_arr, np.linalg.cholesky(in_arr))
def construct_zernike_polynomials(x, y, zernike_indexes, mask=None, weight=None): """Return the zerike polynomials for all objects in an image x - the X distance of a point from the center of its object y - the Y distance of a point from the center of its object zernike_indexes - an Nx2 array of the Zernike polynomials to be computed. mask - a mask with same shape as X and Y of the points to consider weight - weightings of points with the same shape as X and Y (default weight on each point is 1). returns a height x width x N array of complex numbers which are the e^i portion of the sine and cosine of the Zernikes """ if x.shape != y.shape: raise ValueError("X and Y must have the same shape") if mask is None: pass elif mask.shape != x.shape: raise ValueError("The mask must have the same shape as X and Y") else: x = x[mask] y = y[mask] if weight is not None: weight = weight[mask] lut = construct_zernike_lookuptable( zernike_indexes) # precompute poly. coeffs. nzernikes = zernike_indexes.shape[0] # compute radii r_square = np.square(x) # r_square = x**2 np.add(r_square, np.square(y), out=r_square) # r_square = x**2 + y**2 # z = y + 1j*x # each Zernike polynomial is poly(r)*(r**m * np.exp(1j*m*phi)) == # poly(r)*(y + 1j*x)**m z = np.empty(x.shape, np.complex) np.copyto(z.real, y) np.copyto(z.imag, x) # preallocate buffers s = np.empty_like(x) zf = np.zeros((nzernikes, ) + x.shape, np.complex) z_pows = {} for idx, (n, m) in zip(range(nzernikes), zernike_indexes): s[:] = 0 if not m in z_pows: if m == 0: z_pows[m] = np.complex(1.0) else: z_pows[m] = z if m == 1 else (z**m) z_pow = z_pows[m] # use Horner scheme for k in range((n - m) / 2 + 1): s *= r_square s += lut[idx, k] s[r_square > 1] = 0 if weight is not None: s *= weight.astype(s.dtype) if m == 0: np.copyto(zf[idx], s) # zf[idx] = s else: np.multiply(s, z_pow, out=zf[idx]) # zf[idx] = s*exp_term if mask is None: result = zf.transpose(tuple(range(1, 1 + x.ndim)) + (0, )) else: result = np.zeros(mask.shape + (nzernikes, ), np.complex) result[mask] = zf.transpose(tuple(range(1, 1 + x.ndim)) + (0, )) return result
import numpy as np import cv2 as cv src = cv.imread("./image/building.JPG") cv.namedWindow("src") cv.imshow("src", src) cv.moveWindow("src", 0, 0) row, col, channel = src.shape[:] canny = cv.Canny(src, 75, 150) cv.imshow("canny", canny) lines = cv.HoughLinesP(canny, 1.0, np.pi / 180, 150, minLineLength=20, maxLineGap=10) dst = np.zeros((row, col, channel), np.uint8) np.copyto(dst, src) for line in lines: x1, y1, x2, y2 = line[0] cv.line(dst, (x1, y1), (x2, y2), (0, 0, 255), 1, cv.LINE_AA) cv.imshow("dst", dst) print(lines) cv.waitKey(0)
def get_query_shapes(self, LABEL_MAP, INST_MAP): instances = INST_MAP[:, :, 0]*10 + \ INST_MAP[:, :, 1]*100 + \ INST_MAP[:, :, 2]*1000 category_list = np.unique(LABEL_MAP) category_list = category_list[category_list != self.IGNORE_LABEL] query_shapes = {} iter_ = 0 #pdb.set_trace() for n in range(0, len(category_list)): # for each semantic label map -- nth_label_map = np.int16(LABEL_MAP) nth_label_map[nth_label_map != category_list[n]] = -1 nth_label_map[nth_label_map == category_list[n]] = 1 nth_label_map[nth_label_map == -1] = 0 # --------------------------------------------------- # check if this category belongs to things or stuff-- is_thing = self.label_data[self.label_data[:, 0] == category_list[n], 4] if(is_thing != 1): [n_r, n_c] = np.nonzero(nth_label_map) y1 = np.amin(n_r) y2 = np.amax(n_r) x1 = np.amin(n_c) x2 = np.amax(n_c) comp_shape = nth_label_map[y1:y2, x1:x2] comp_context = LABEL_MAP[y1:y2, x1:x2] ar = np.size( comp_context, 0)/(float(np.size(comp_context, 1)) + sys.float_info.epsilon) # -- get the comp-list query_shapes[iter_] = {} query_shapes[iter_]['comp_shape'] = comp_shape query_shapes[iter_]['comp_context'] = comp_context query_shapes[iter_]['shape_label'] = category_list[n] query_shapes[iter_]['ar'] = ar query_shapes[iter_]['bbox'] = [y1, x1, y2, x2] query_shapes[iter_]['dim'] = [ np.size(nth_label_map, 0), np.size(nth_label_map, 1)] iter_ = iter_ + 1 else: nth_inst_map = np.empty_like(instances) np.copyto(nth_inst_map, instances) nth_inst_map[nth_inst_map == 0] = -1 nth_inst_map[nth_label_map == 0] = -1 nth_inst_ids = np.unique(nth_inst_map) nth_inst_ids = nth_inst_ids[nth_inst_ids != -1] for m in range(0, len(nth_inst_ids)): mth_inst_map = np.empty_like(nth_inst_map) np.copyto(mth_inst_map, nth_inst_map) mth_inst_map[mth_inst_map != nth_inst_ids[m]] = -1 mth_inst_map[mth_inst_map == nth_inst_ids[m]] = 1 mth_inst_map[mth_inst_map == -1] = 0 # -- [m_r, m_c] = np.nonzero(mth_inst_map) y1 = np.amin(m_r) y2 = np.amax(m_r) x1 = np.amin(m_c) x2 = np.amax(m_c) comp_shape = mth_inst_map[y1:y2, x1:x2] comp_context = LABEL_MAP[y1:y2, x1:x2] ar = np.size( comp_context, 0)/(float(np.size(comp_context, 1)) + sys.float_info.epsilon) # -- comp_list data query_shapes[iter_] = {} query_shapes[iter_]['comp_shape'] = comp_shape query_shapes[iter_]['comp_context'] = comp_context query_shapes[iter_]['shape_label'] = category_list[n] query_shapes[iter_]['ar'] = ar query_shapes[iter_]['bbox'] = [y1, x1, y2, x2] query_shapes[iter_]['dim'] = [ np.size(nth_label_map, 0), np.size(nth_label_map, 1)] iter_ = iter_ + 1 return query_shapes
def get_exemplar_shapes(self, EXEMPLAR_MATCHES): exemplar_shapes = {} iter_ = 0 for i in range(0, len(EXEMPLAR_MATCHES)): LABEL_MAP = cv2.imread(self.LABEL_PATH + EXEMPLAR_MATCHES[i][0].decode('UTF-8')) LABEL_MAP = np.int16(LABEL_MAP[:, :, 0]) INST_MAP = np.int32(cv2.imread( self.INST_PATH + EXEMPLAR_MATCHES[i][0].decode('UTF-8'))) EXEMPLAR_IMAGE = cv2.imread(self.IMAGE_PATH + (EXEMPLAR_MATCHES[i][0]).decode('UTF-8').replace('.png', '.jpg')) instances = INST_MAP[:, :, 0]*10 + \ INST_MAP[:, :, 1]*100 + \ INST_MAP[:, :, 2]*1000 category_list = np.unique(LABEL_MAP) category_list = category_list[category_list != self.IGNORE_LABEL] for n in range(0, len(category_list)): # for each semantic label map -- nth_label_map = np.empty_like(LABEL_MAP) np.copyto(nth_label_map, LABEL_MAP) nth_label_map[nth_label_map != category_list[n]] = -1 nth_label_map[nth_label_map == category_list[n]] = 1 nth_label_map[nth_label_map == -1] = 0 # --------------------------------------------------- # check if this category belongs to things or stuff-- is_thing = self.label_data[self.label_data[:, 0] == category_list[n], 4] if(is_thing != 1): [n_r, n_c] = np.nonzero(nth_label_map) y1 = np.amin(n_r) y2 = np.amax(n_r) x1 = np.amin(n_c) x2 = np.amax(n_c) comp_shape = nth_label_map[y1:y2, x1:x2] comp_context = LABEL_MAP[y1:y2, x1:x2] comp_rgb = np.empty_like(EXEMPLAR_IMAGE[y1:y2, x1:x2, :]) np.copyto(comp_rgb, EXEMPLAR_IMAGE[y1:y2, x1:x2, :]) comp_rgb[:, :, 0] = comp_rgb[:, :, 0] * comp_shape comp_rgb[:, :, 1] = comp_rgb[:, :, 1] * comp_shape comp_rgb[:, :, 2] = comp_rgb[:, :, 2] * comp_shape ar = np.size( comp_context, 0)/(float(np.size(comp_context, 1)) + sys.float_info.epsilon) # -- get the comp-list exemplar_shapes[iter_] = {} exemplar_shapes[iter_]['comp_shape'] = comp_shape exemplar_shapes[iter_]['comp_context'] = comp_context exemplar_shapes[iter_]['comp_rgb'] = comp_rgb exemplar_shapes[iter_]['org_rgb'] = EXEMPLAR_IMAGE[y1:y2, x1:x2, :] exemplar_shapes[iter_]['shape_label'] = category_list[n] exemplar_shapes[iter_]['ar'] = ar exemplar_shapes[iter_]['bbox'] = [y1, x1, y2, x2] exemplar_shapes[iter_]['dim'] = [ np.size(nth_label_map, 0), np.size(nth_label_map, 1)] iter_ = iter_ + 1 else: nth_inst_map = np.empty_like(instances) np.copyto(nth_inst_map, instances) nth_inst_map[nth_inst_map == 0] = -1 nth_inst_map[nth_label_map == 0] = -1 nth_inst_ids = np.unique(nth_inst_map) nth_inst_ids = nth_inst_ids[nth_inst_ids != -1] for m in range(0, len(nth_inst_ids)): mth_inst_map = np.empty_like(nth_inst_map) np.copyto(mth_inst_map, nth_inst_map) mth_inst_map[mth_inst_map != nth_inst_ids[m]] = -1 mth_inst_map[mth_inst_map == nth_inst_ids[m]] = 1 mth_inst_map[mth_inst_map == -1] = 0 # -- [m_r, m_c] = np.nonzero(mth_inst_map) y1 = np.amin(m_r) y2 = np.amax(m_r) x1 = np.amin(m_c) x2 = np.amax(m_c) comp_shape = mth_inst_map[y1:y2, x1:x2] comp_context = LABEL_MAP[y1:y2, x1:x2] comp_rgb = np.empty_like( EXEMPLAR_IMAGE[y1:y2, x1:x2, :]) np.copyto(comp_rgb, EXEMPLAR_IMAGE[y1:y2, x1:x2, :]) comp_rgb[:, :, 0] = comp_rgb[:, :, 0] * comp_shape comp_rgb[:, :, 1] = comp_rgb[:, :, 1] * comp_shape comp_rgb[:, :, 2] = comp_rgb[:, :, 2] * comp_shape ar = np.size( comp_context, 0)/(float(np.size(comp_context, 1)) + sys.float_info.epsilon) # -- comp_list data exemplar_shapes[iter_] = {} exemplar_shapes[iter_]['comp_shape'] = comp_shape exemplar_shapes[iter_]['comp_context'] = comp_context exemplar_shapes[iter_]['comp_rgb'] = comp_rgb exemplar_shapes[iter_]['org_rgb'] = EXEMPLAR_IMAGE[y1:y2, x1:x2, :] exemplar_shapes[iter_]['shape_label'] = category_list[n] exemplar_shapes[iter_]['ar'] = ar exemplar_shapes[iter_]['bbox'] = [y1, x1, y2, x2] exemplar_shapes[iter_]['dim'] = [ np.size(nth_label_map, 0), np.size(nth_label_map, 1)] iter_ = iter_ + 1 return exemplar_shapes
def loadParameters(self, pD): try: np.copyto(self.mean, pD[self.instanceName + ':mean']) np.copyto(self.var, pD[self.instanceName + ':std']) except: self.printError("Layer parameter(s) does not exist while loading.")
def get_matching_score(self, query_data, exemplar_data): # query data -- query_shape = np.empty_like(query_data['comp_shape']) np.copyto(query_shape, query_data['comp_shape']) q_h = np.size(query_shape, 0) q_w = np.size(query_shape, 1) if(q_h <= 1 | q_w <= 1): return np.array([]) query_shape_rs = cv2.resize(query_shape, dsize=(self.WIN_SIZE, self.WIN_SIZE), interpolation=cv2.INTER_NEAREST) query_shape_rs[query_shape_rs == 0] = -1 query_label = query_data['shape_label'] query_ar = query_data['ar'] # get the relevant labels from the exemplar data synth_data = np.zeros((len(exemplar_data), 8), dtype='float') for j in range(0, len(exemplar_data)): jth_label = exemplar_data[j]['shape_label'] if(jth_label != query_label): continue jth_shape = np.empty_like(exemplar_data[j]['comp_shape']) np.copyto(jth_shape, exemplar_data[j]['comp_shape']) jth_h = np.size(jth_shape, 0) jth_w = np.size(jth_shape, 1) if((jth_h < (self.RES_F * q_h)) | (jth_w < (self.RES_F * q_w))): continue jth_ar = exemplar_data[j]['ar'] ar12 = np.divide(query_ar, float(jth_ar) + sys.float_info.epsilon) if((ar12 < 0.5) | (ar12 > 2.0)): continue jth_search_shape = cv2.resize(jth_shape, dsize=(self.WIN_SIZE, self.WIN_SIZE), interpolation=cv2.INTER_NEAREST) jth_search_shape[jth_search_shape == 0] = -1 jth_score = np.divide((query_shape_rs.flatten() * jth_search_shape.flatten()).sum(), float(np.size(query_shape_rs, 0)*np.size(query_shape_rs, 1)) + sys.float_info.epsilon) synth_data[j, :] = [1, 1, np.size(query_shape_rs, 1), np.size(query_shape_rs, 0), jth_score, 0, j, 1] synth_data = synth_data[synth_data[:, 7] == 1, :] if synth_data.size == 0: return synth_data # find the exmples better than SHAPE_THRESH val_examples = synth_data[:, 4] >= self.SHAPE_THRESH if(val_examples.sum() == 0): Is = np.argmax(synth_data[:, 4]) score = np.tile(synth_data[Is, :], [self.TOP_K, 1]) return score # if there are more examples score = synth_data[val_examples, :] Is = np.argsort(score[:, 4]) rev_Is = Is[::-1] score = score[rev_Is, :] num_ex = np.minimum(np.size(score, 0), self.TOP_K) score = score[0:num_ex, :] if(np.size(score, 0) < self.TOP_K): score = np.tile(score, [self.TOP_K, 1]) score = score[0:self.TOP_K, :] return score
def runTests(): global board initializeGame() # This board is an extreme example of COMPLETE enclosure print("test enclosure detection positive") board = np.array([[0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0], [1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1], [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0]]) if (not isVictory()): exit('FAIL') initializeGame() # This crazy board is an extreme example of an INCOMPLETE enclosure print("test enclosure detection negative") board = np.array([[0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1], [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 1], [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0]]) if (isVictory()): exit('FAIL') ### FORCE A TEST OF REPEAT global repeatCheckOn repeatCheckOn = True board = np.array([[0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 2, 0, 0, 0, 1, 0, 0, 1, 3, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 2, 0, 2, 1, 0, 0, 0], [0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]]) #Precedence tests smartMove() #Capture test print("Capture up") movePiece([3, 1], [2, 1]) if (board[1, 1] != 0): exit("FAIL") print("Simultaneous capture left, right, and down") movePiece([5, 5], [8, 5]) if ((board[8, 4] != 0) or (board[8, 6] != 0) or (board[9, 5] != 0)): print(board) exit("FAIL") print("Capture against restricted square") movePiece([1, 5], [3, 5]) if (board[4, 5] != 0): exit("FAIL") print("Capture king") movePiece([3, 9], [2, 9]) if (not gameOver): exit("FAIL") initializeGame() board = np.zeros([11, 11], dtype=int) # use this to set up an empty board board[5, 0] = 3 # set up a test board with only a king print("Move king to restricted square (this is allowed)") if (not movePiece([5, 0], [5, 5])): exit("FAIL") np.copyto(board, startingBoard) storeState() print("Throw error when moving attacker to restricted square") if (movePiece([10, 7], [10, 10]) or lastReturnCode != RESTRICTED_SQUARE): exit("FAIL") print("Throw error when moving through a block") if (movePiece([10, 7], [10, 5]) or lastReturnCode != WAY_BLOCKED): exit("FAIL") print("Throw error when starting position is empty") if (movePiece([10, 10], [10, 5]) or lastReturnCode != NO_PIECE_TO_MOVE): exit("FAIL") print("Throw error when start and end positions are the same") if (movePiece([10, 7], [10, 7]) or lastReturnCode != PIECE_DID_NOT_MOVE): exit("FAIL") movePiece([10, 7], [10, 8]) movePiece([10, 8], [10, 7]) movePiece([10, 7], [10, 8]) if (repeatCheckOn): print("Throw error when board state has repeated too many times") if (movePiece([10, 8], [10, 7]) or lastReturnCode != TOO_MANY_REPEATS): exit("FAIL") print("Throw error if piece moves diagonally") if (movePiece([10, 8], [9, 5]) or lastReturnCode != NO_DIAGONALS): exit("FAIL") print("Throw error if piece moves off the board") if (movePiece([10, 8], [11, 8]) or lastReturnCode != WAY_BLOCKED): exit("FAIL") exit("All tests PASS")
ab['var1'] = lib['ids1'] ab['var2'] = lib['strands1'] ab['var3'] = c1 ab['var4'] = lib['cuts1'] ab['var5'] = lib['rfragIdxs1'] ab['var6'] = lib['strands2'] ab['var7'] = c2 ab['var8'] = lib['cuts2'] ab['var9'] = lib['rfragIdxs2'] ab['var10'] = mapq ab['var11'] = mapq # Swap so that lowest chr numbers are to the left: np.copyto(ab['var3'], c2, where=swap) np.copyto(ab['var7'], c1, where=swap) np.copyto(ab['var2'], lib['strands2'], where=swap) np.copyto(ab['var6'], lib['strands1'], where=swap) np.copyto(ab['var4'], lib['cuts2'], where=swap) np.copyto(ab['var8'], lib['cuts1'], where=swap) np.copyto(ab['var5'], lib['rfragIdxs2'], where=swap) np.copyto(ab['var9'], lib['rfragIdxs1'], where=swap) # sort on chromosomes: #np.argsort(ab, order=['var3', 'var7']) np.savetxt(sys.stdout, ab[filter], fmt="%s %i %s %i %i %i %s %i %i %i %i")
def _write_obs(maybe_dict_obs): flatdict = obs_to_dict(maybe_dict_obs) for k in keys: dst = obs_bufs[k].get_obj() dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212 np.copyto(dst_np, flatdict[k])
def _create_group(self, drawing, projection, viewport, mesh): faces = mesh.faces shader = mesh.shader or (lambda face_index, winding: {}) default_style = mesh.style or {} # Extend each point to a vec4, then transform to clip space. faces = np.dstack([faces, np.ones(faces.shape[:2])]) faces = np.dot(faces, projection) # Reject trivially clipped polygons. xyz, w = faces[:, :, :3], faces[:, :, 3:] accepted = np.logical_and(np.greater(xyz, -w), np.less(xyz, +w)) accepted = np.all(accepted, 2) # vert is accepted if xyz are all inside accepted = np.any(accepted, 1) # face is accepted if any vert is inside degenerate = np.less_equal(w, 0)[:, :, 0] # vert is bad if its w <= 0 degenerate = np.any(degenerate, 1) # face is bad if any of its verts are bad accepted = np.logical_and(accepted, np.logical_not(degenerate)) faces = np.compress(accepted, faces, axis=0) # Apply perspective transformation. xyz, w = faces[:, :, :3], faces[:, :, 3:] faces = xyz / w # Sort faces from back to front. face_indices = self._sort_back_to_front(faces) faces = faces[face_indices] # Apply viewport transform to X and Y. faces[:, :, 0:1] = (1.0 + faces[:, :, 0:1]) * viewport.width / 2 faces[:, :, 1:2] = (1.0 - faces[:, :, 1:2]) * viewport.height / 2 faces[:, :, 0:1] += viewport.minx faces[:, :, 1:2] += viewport.miny # Compute the winding direction of each polygon. windings = np.zeros(faces.shape[0]) if faces.shape[1] >= 3: p0, p1, p2 = faces[:, 0, :], faces[:, 1, :], faces[:, 2, :] normals = np.cross(p2 - p0, p1 - p0) np.copyto(windings, normals[:, 2]) group = drawing.g(**default_style) # Create circles. if mesh.circle_radius > 0: for face_index, face in enumerate(faces): style = shader(face_indices[face_index], 0) if style is None: continue face = np.around(face[:, :2], self.precision) for pt in face: group.add(drawing.circle(pt, mesh.circle_radius, **style)) return group # Create polygons and lines. for face_index, face in enumerate(faces): style = shader(face_indices[face_index], windings[face_index]) if style is None: continue face = np.around(face[:, :2], self.precision) if len(face) == 2: group.add(drawing.line(face[0], face[1], **style)) else: group.add(drawing.polygon(face, **style)) return group
def prepareInputsForFpga(self, inputs, cfgFile, scale, PE=-1, layerName=""): startTime = timeit.default_timer() execData = self.getOrCreateExecData(PE) numBatches, imgSize = inputs.shape #print "imgSize: %s, numBatch: %s" % (imgSize, numBatches) inputPtrsNeedInit = (type(execData._cpuInputs) == type(None) \ or execData._cpuInputs.shape[0] != numBatches \ or type(execData._fpgaInputs) == type(None) \ or len(execData._fpgaInputs) != numBatches \ or execData._imgSize != imgSize) if inputPtrsNeedInit: # prepare src float array execData._imgSize = imgSize execData._cpuInputs = np.ascontiguousarray(\ np.zeros(numBatches*imgSize).reshape(numBatches, imgSize), dtype=np.float32) # prepare array of ptrs to each input image execData._cpuInputPtrs = (POINTER(c_float) * numBatches)() for i in range(numBatches): execData._cpuInputPtrs[i] \ = execData._cpuInputs[i].ctypes.data_as(POINTER(c_float)) # prepare tgt short array execData._fpgaInputPtrs = (POINTER(c_short) * numBatches)() execData._fpgaInputPtrsOrig = execData._fpgaInputPtrs # free existing mem (if any) if execData._fpgaInputHandles: for fps in execData._fpgaInputHandles: for hi, h in enumerate(self._handles): self._lib.xFree(h, fps[hi], True) execData._fpgaInputs = [] execData._fpgaInputHandles = [] # make new mem for i in range(numBatches): (fpgaArr, fpgaHandles) = self.makeFPGAShortArray(imgSize) execData._fpgaInputs.append(fpgaArr) execData._fpgaInputPtrs[i] \ = execData._fpgaInputs[i].ctypes.data_as(POINTER(c_short)) execData._fpgaInputHandles.append(fpgaHandles) else: execData._fpgaInputPtrs = execData._fpgaInputPtrsOrig if inputs.dtype == np.float32: # grab new input data for i in range(numBatches): np.copyto(execData._cpuInputs[i], inputs[i]) # prepare data for FPGA actualNumFpgaInputs = self._lib.XDNNPrepareInput(\ layerName, execData._cpuInputPtrs, execData._fpgaInputPtrs, numBatches, imgSize, cfgFile, scale) if actualNumFpgaInputs < len(execData._fpgaInputPtrs): # quantized/packed truncatedArr = (POINTER(c_short) * actualNumFpgaInputs)() for i in range(actualNumFpgaInputs): truncatedArr[i] = execData._fpgaInputPtrs[i] execData._fpgaInputPtrs = truncatedArr else: # already prepared, just populate fields for i in range(numBatches): np.copyto(execData._fpgaInputs[i], inputs[i]) # tell FPGA there's new data self.markDirty(execData._peMask) elapsedTime = timeit.default_timer() - startTime #print "PrepareInputsForFpga elapsed (%f ms):" % (elapsedTime * 1000) return execData._fpgaInputPtrs
Angry = [img1, img2, img3] Disgusting = [img4, img5, img6] Fearful = [img7, img8, img9] Happy = [img10, img11, img12] '''Sad = [img13, img14, img15] Surprising = [img16, img17, img18] Neutral = [img19, img20, img21]''' # 이미지 지정 background = Happy[x] '''cv2.resize(원본, dsize=(0, 0),가로배수,세로배수, interpolation=cv2.INTER_LINEAR)''' logo = cv2.imread("C:/Users/user/PycharmProjects/OpenCV/Black Layer.png") gray_logo = cv2.cvtColor(logo, cv2.COLOR_BGR2GRAY) _, mask_inv = cv2.threshold(gray_logo, 10, 255, cv2.THRESH_BINARY_INV) background_height, background_width, _ = background.shape # 900, 600, 3 logo_height, logo_width, _ = logo.shape # 360, 313, 3 x = background_height - logo_height y = (background_width - logo_width) // 2 roi = background[x:x + logo_height, y:y + logo_width] roi_logo = cv2.add(logo, roi, mask=mask_inv) result = cv2.add(roi_logo, logo) np.copyto(roi, result) cv2.imshow("result_background", background) cv2.waitKey()
def test_copyto(): a = np.arange(6, dtype='i4').reshape(2, 3) # Simple copy np.copyto(a, [[3, 1, 5], [6, 2, 1]]) assert_equal(a, [[3, 1, 5], [6, 2, 1]]) # Overlapping copy should work np.copyto(a[:, :2], a[::-1, 1::-1]) assert_equal(a, [[2, 6, 5], [1, 3, 1]]) # Defaults to 'same_kind' casting assert_raises(TypeError, np.copyto, a, 1.5) # Force a copy with 'unsafe' casting, truncating 1.5 to 1 np.copyto(a, 1.5, casting='unsafe') assert_equal(a, 1) # Copying with a mask np.copyto(a, 3, where=[True, False, True]) assert_equal(a, [[3, 1, 3], [3, 1, 3]]) # Casting rule still applies with a mask assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) # Lists of integer 0's and 1's is ok too np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) assert_equal(a, [[3, 4, 4], [4, 1, 3]]) # Overlapping copy with mask should work np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) assert_equal(a, [[3, 4, 4], [4, 3, 3]]) # 'dst' must be an array assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
data = np.reshape(codebook[code], weights.shape) np.copyto(weights, data) nz_num = np.fromfile(fin, dtype=np.uint32, count=len(layers)) for idx, layer in enumerate(layers): print "Reconstruct layer", layer print "Total Non-zero number:", nz_num[idx] if 'conv' in layer: bits = 8 else: bits = 4 codebook_size = 2**bits codebook = np.fromfile(fin, dtype=np.float32, count=codebook_size) bias = np.fromfile(fin, dtype=np.float32, count=net.params[layer][1].data.size) np.copyto(net.params[layer][1].data, bias) spm_stream = np.fromfile(fin, dtype=np.uint8, count=(nz_num[idx] - 1) / (8 / bits) + 1) ind_stream = np.fromfile(fin, dtype=np.uint8, count=(nz_num[idx] - 1) / 2 + 1) binary_to_net(net.params[layer][0].data, spm_stream, ind_stream, codebook, nz_num[idx]) net.save(target)
def copyto( dst: ndpoly, src: PolyLike, casting: str = "same_kind", where: numpy.typing.ArrayLike = True, ) -> None: """ Copy values from one array to another, broadcasting as necessary. Raises a TypeError if the `casting` rule is violated, and if `where` is provided, it selects which elements to copy. Args: dst: The array into which values are copied. src: The array from which values are copied. casting: Controls what kind of data casting may occur when copying. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. where: A boolean array which is broadcasted to match the dimensions of `dst`, and selects elements to copy from `src` to `dst` wherever it contains the value True. Examples: >>> q0 = numpoly.variable() >>> poly1 = numpoly.polynomial([1, q0**2, q0]) >>> poly2 = numpoly.polynomial([2, q0, 3]) >>> numpoly.copyto(poly1, poly2, where=[True, False, True]) >>> poly1 polynomial([2, q0**2, 3]) >>> numpoly.copyto(poly1, poly2) >>> poly1 polynomial([2, q0, 3]) """ logger = logging.getLogger(__name__) src = numpoly.aspolynomial(src) assert isinstance(dst, numpy.ndarray) if not isinstance(dst, numpoly.ndpoly): if dst.dtype.names is None: if src.isconstant(): return numpy.copyto(dst, src.tonumpy(), casting=casting, where=where) raise ValueError(f"Could not convert src {src} to dst {dst}") if casting != "unsafe": raise ValueError( f"could not safely convert src {src} to dst {dst}") logger.warning("Copying ndpoly input into ndarray") logger.warning("You might need to cast `numpoly.polynomial(dst)`.") logger.warning("Indeterminant names might be lost.") dst_keys = dst.dtype.names dst_ = dst else: dst_keys = dst.keys src, _ = numpoly.align_indeterminants(src, dst) dst_ = dst.values missing_keys = set(src.keys).difference(dst_keys) if missing_keys: raise ValueError(f"memory layouts are incompatible: {missing_keys}") for key in dst_keys: if key in src.keys: numpy.copyto(dst_[key], src.values[key], casting=casting, where=where) else: numpy.copyto(dst_[key], numpy.array(False, dtype=dst_[key].dtype), casting=casting, where=where)
def test_copyto_permut(): # test explicit overflow case pad = 500 l = [True] * pad + [True, True, True, True] r = np.zeros(len(l)-pad) d = np.ones(len(l)-pad) mask = np.array(l)[pad:] np.copyto(r, d, where=mask[::-1]) # test all permutation of possible masks, 9 should be sufficient for # current 4 byte unrolled code power = 9 d = np.ones(power) for i in range(2**power): r = np.zeros(power) l = [(i & x) != 0 for x in range(power)] mask = np.array(l) np.copyto(r, d, where=mask) assert_array_equal(r == 1, l) assert_equal(r.sum(), sum(l)) r = np.zeros(power) np.copyto(r, d, where=mask[::-1]) assert_array_equal(r == 1, l[::-1]) assert_equal(r.sum(), sum(l)) r = np.zeros(power) np.copyto(r[::2], d[::2], where=mask[::2]) assert_array_equal(r[::2] == 1, l[::2]) assert_equal(r[::2].sum(), sum(l[::2])) r = np.zeros(power) np.copyto(r[::2], d[::2], where=mask[::-2]) assert_array_equal(r[::2] == 1, l[::-2]) assert_equal(r[::2].sum(), sum(l[::-2])) for c in [0xFF, 0x7F, 0x02, 0x10]: r = np.zeros(power) mask = np.array(l) imask = np.array(l).view(np.uint8) imask[mask != 0] = c np.copyto(r, d, where=mask) assert_array_equal(r == 1, l) assert_equal(r.sum(), sum(l)) r = np.zeros(power) np.copyto(r, d, where=True) assert_equal(r.sum(), r.size) r = np.ones(power) d = np.zeros(power) np.copyto(r, d, where=False) assert_equal(r.sum(), r.size)
def add_expert(self, obs, action, reward, next_obs, done, done_no_max): for a in range(4): self.k += 1 np.copyto(self.obses[self.idx], obs) np.copyto(self.actions[self.idx], a) np.copyto(self.rewards[self.idx], reward) np.copyto(self.next_obses[self.idx], next_obs) np.copyto(self.not_dones[self.idx], not done) np.copyto(self.not_dones_no_max[self.idx], not done_no_max) self.idx = (self.idx + 1) % self.capacity self.full = self.full or self.idx == 0
def save(self, ndarray: np.ndarray) -> None: assert isinstance(ndarray, np.ndarray) dst = self.arr.get_obj() dst_np = np.frombuffer(dst, dtype=self.dtype).reshape(self.shape) np.copyto(dst_np, ndarray)
def setStateSpace(self, _A_hat, _K, _H, _C, _D): assert _A_hat.shape == (self.n_states, self.n_states) assert _K.shape == (self.n_states, self.n_outputs) assert _H.shape == (self.n_states, self.n_inputs) assert _C.shape == (self.n_outputs, self.n_states) assert _D.shape == (self.n_outputs, self.n_inputs) np.copyto(self.A_hat, _A_hat) np.copyto(self.K, _K) np.copyto(self.H, _H) np.copyto(self.C, _C) np.copyto(self.D, _D) # State Transition Matrix np.copyto(self.state_trans, expm(self.A_hat * self.d_time)) np.copyto(self.st_I_dt, (np.eye(self.n_states) + self.state_trans) * self.d_time)
def load_data(self, data): np.copyto(self.data, data)
def config_table(maxf, n, sort=True, fun=None, index_range=None, minf=None): """ Tabulate a list of configurations in `n` indices with fun(config) <= `maxf`. Parameters ---------- maxf : scalar The maximum excitation. n : int The number of indices. sort : boolean If True (default), the configuration table is sorted by the excitation function. fun : function, optional The excitation function. If None (default), the sum of indices is used. index_range : array_like The dimension of each index. The default is infinite. minf : scalar The minimum excitation. The default is negative infinity. Allowed configurations are *strictly greater than* `minf`. Returns ------- ndarray Notes ----- Custom exciations functions `fun` take an argument ''configs'' with is a (..., `n`) array_like containing an array of configuration index vectors. It must return a (...)-shaped array with the excitation value. The excitation value must be monotonically increasing with an increment of one or more configuration indices. """ if fun is None: fun = lambda x: np.sum(x, axis=-1) # sum over last axis if index_range is None: index_range = [np.inf for i in range(n)] # Maximum index values if minf is None: minf = -np.inf # Negative infinity ####################################################### # Loop once to count how many configurations there are. # Then repeat and actually record them. # nconfig = 0 for record in [False, True]: if record: table = np.zeros((nconfig, n), dtype=np.int32) config = np.zeros((n, ), dtype=np.uint32) nconfig = 0 if fun(config) > maxf: # The [0 0 0 ... ] configuration is already # greater than the maximum excitation. Return an empty table. return np.zeros((0, n)) # Otherwise, continue if fun(config) > minf: # Strictly greater than minf ! nconfig += 1 # Count [0 0 0 0 ...] if record: np.copyto(table[0, :], config) while True: # # Attempt to increment the current configuration `config` # # next_config = config.copy() found_valid = False for j in range(n): # Try to increment index j, starting from the left. next_config[j] += 1 if next_config[j] < index_range[j] and fun( next_config) <= maxf: # This config is okay. if fun(next_config) > minf: # Strictly greater than minf! nconfig += 1 config = next_config found_valid = True break else: # next_config is out-of-bounds # Set this index to zero and move on to the next one next_config[j] = 0 if not found_valid: # We were not able to find a new valid configuration break elif found_valid and record and fun(next_config) > minf: np.copyto(table[nconfig - 1, :], next_config) if sort: # First sort table by configuration indices for j in range(n - 1, -1, -1): I = np.argsort(-table[:, j], kind='stable') table = table[I, :] # descending sort # Then sort by the excitation number of each configuration # This will leave the final table sorted by excitation first, # and then by each column left-to-right I = np.argsort(fun(table), kind='stable') table = table[I, :] # Sort table by the excitation number return table
def backward_pass(self, activate_second_order_dynamics=0): ################## defining local functions & variables for faster access ################ partials_list = self.partials_list k = np.copy(self.k) K = np.copy(self.K) V_x = np.copy(self.V_x) V_xx = np.copy(self.V_xx) ########################################################################################## V_x[self.N - 1] = self.l_x_f(self.X_p[self.N - 1]) np.copyto(V_xx[self.N - 1], 2 * self.Q_final) #initialize before forward pass del_J_alpha = 0 for t in range(self.N - 1, -1, -1): if t > 0: Q_x, Q_u, Q_xx, Q_uu, Q_ux = partials_list( self.X_p[t - 1], self.U_p[t], V_x[t], V_xx[t], activate_second_order_dynamics) elif t == 0: Q_x, Q_u, Q_xx, Q_uu, Q_ux = partials_list( self.X_p_0, self.U_p[0], V_x[0], V_xx[0], activate_second_order_dynamics) try: # If a matrix cannot be positive-definite, that means it cannot be cholesky decomposed np.linalg.cholesky(Q_uu) except np.linalg.LinAlgError: print("FAILED! Q_uu is not Positive definite at t=", t) b_pass_success_flag = 0 # If Q_uu is not positive definite, revert to the earlier values np.copyto(k, self.k) np.copyto(K, self.K) np.copyto(V_x, self.V_x) np.copyto(V_xx, self.V_xx) break else: b_pass_success_flag = 1 # control-limited as follows k[t] = -(np.linalg.inv(Q_uu) @ Q_u) K[t] = -(np.linalg.inv(Q_uu) @ Q_ux) del_J_alpha += -self.alpha * ( (k[t].T) @ Q_u) - 0.5 * self.alpha**2 * ( (k[t].T) @ (Q_uu @ k[t])) if t > 0: V_x[t - 1] = Q_x + (K[t].T) @ (Q_uu @ k[t]) + ( (K[t].T) @ Q_u) + ((Q_ux.T) @ k[t]) V_xx[t - 1] = Q_xx + ((K[t].T) @ (Q_uu @ K[t])) + ( (K[t].T) @ Q_ux) + ((Q_ux.T) @ K[t]) ######################### Update the new gains ############################################## np.copyto(self.k, k) np.copyto(self.K, K) np.copyto(self.V_x, V_x) np.copyto(self.V_xx, V_xx) ############################################################################################# self.count += 1 return b_pass_success_flag, del_J_alpha
def values(self, corr_plus, corr_cross, snrv, psd, indices, template_plus, template_cross, u_vals, hplus_cross_corr, hpnorm, hcnorm): """ Calculate the chisq at points given by indices. Returns ------- chisq: Array Chisq values, one for each sample index chisq_dof: Array Number of statistical degrees of freedom for the chisq test in the given template """ if self.do: num_above = len(indices) if self.snr_threshold: above = abs(snrv) > self.snr_threshold num_above = above.sum() logging.info('%s above chisq activation threshold' % num_above) above_indices = indices[above] above_snrv = snrv[above] u_vals = u_vals[above] rchisq = numpy.zeros(len(indices), dtype=numpy.float32) dof = -100 else: above_indices = indices above_snrv = snrv if num_above > 0: chisq = [] curr_tmplt_mult_fac = 0. curr_corr_mult_fac = 0. if self.template_mem is None or \ (not len(self.template_mem) == len(template_plus)): self.template_mem = zeros( len(template_plus), dtype=complex_same_precision_as(corr_plus)) if self.corr_mem is None or \ (not len(self.corr_mem) == len(corr_plus)): self.corr_mem = zeros( len(corr_plus), dtype=complex_same_precision_as(corr_plus)) tmplt_data = template_cross.data corr_data = corr_cross.data numpy.copyto(self.template_mem.data, template_cross.data) numpy.copyto(self.corr_mem.data, corr_cross.data) template_cross._data = self.template_mem.data corr_cross._data = self.corr_mem.data for lidx, index in enumerate(above_indices): above_local_indices = numpy.array([index]) above_local_snr = numpy.array([above_snrv[lidx]]) local_u_val = u_vals[lidx] # Construct template from _plus and _cross # Note that this modifies in place, so we store that and # revert on the next pass. template = template_cross.multiply_and_add( template_plus, local_u_val - curr_tmplt_mult_fac) curr_tmplt_mult_fac = local_u_val template.f_lower = template_plus.f_lower template.params = template_plus.params # Construct the corr vector norm_fac = local_u_val * local_u_val + 1 norm_fac += 2 * local_u_val * hplus_cross_corr norm_fac = hcnorm / (norm_fac**0.5) hp_fac = local_u_val * hpnorm / hcnorm corr = corr_cross.multiply_and_add( corr_plus, hp_fac - curr_corr_mult_fac) curr_corr_mult_fac = hp_fac bins = self.calculate_chisq_bins(template, psd) dof = (len(bins) - 1) * 2 - 2 curr_chisq = power_chisq_at_points_from_precomputed( corr, above_local_snr / norm_fac, norm_fac, bins, above_local_indices) chisq.append(curr_chisq[0]) chisq = numpy.array(chisq) # Must reset corr and template to original values! template_cross._data = tmplt_data corr_cross._data = corr_data if self.snr_threshold: if num_above > 0: rchisq[above] = chisq else: rchisq = chisq return rchisq, numpy.repeat( dof, len(indices)) # dof * numpy.ones_like(indices) else: return None, None
"nullval": nullval, "t_Q": t_Q, "p_up": p.copy(), "v": np.zeros([nchar]), "tmp": np.zeros([nchar + 1]), "motherRow": np.zeros([nchar + 1]), "childlist": childlist } var["nodelist-up"] = var["nodelist"].copy() tip_states = None pi = "Fitzjohn" np.copyto(var["nodelist"], var["nodelistOrig"]) var["root_priors"].fill(1.0) import copy var1 = copy.deepcopy(var) preallocated_arrays = var cProfile.run("hrm_multipass(tree, chars, Q, 2, preallocated_arrays=var)") Qtype = "Simple" Qparams = np.array([0.0, 1.0, 0.00, 1.0, 1.0, 5.5, 1.0, 5.5]) pi = "Fitzjohn" l_single = discrete.create_likelihood_function_hrm_mk(tree=tree, chars=chars,
def run_game(self, epsilon, state, run_type=0, pareto_filter=None, default_action=-1): """"Run a Game""" # reset game for the next epoch gamma = 0.1 # since immediate rewards are more important keep gamma low steps = 0 action_def = 0 action_att = 0 reward_sum = 0 vector_reward_sum = np.zeros(3, dtype=np.int) scalar_att = state.config.scalarize_att att_mag = np.sqrt(np.einsum('i,i', scalar_att, scalar_att)) nn_input_old = np.zeros(state.size_graph + 2, dtype=np.int) # +2 for the game points # run_type == 0 (RANDOM) means random defender actions if run_type == 0: epsilon = 1 # log variables self.log_object.chosen_action = "" check_one = np.zeros(3, dtype=float) check_two = np.zeros(3, dtype=float) check_three = np.zeros(3, dtype=float) # make sure that there is a reward matrix if isinstance(state, State) and not isinstance(state, ChaosState): state.reset_reward_matrix() # run the game while state.get_points(True) > 0 and state.get_points( False) > 0 and steps < 200: # find the Q-values for the state-action pairs q_table = self.model.predict(state.nn_input.reshape( 1, state.nn_input.size), batch_size=1) # guess an attack action # determine the valid attack actions num_moves = state.size_graph + 1 indices = np.zeros(num_moves, dtype=np.int) for i in range(num_moves): indices[i] = i indices = indices[state.actions_att] # assume a random defense action assumed_def = state.size_graph for j in range(100): assumed_def = np.random.randint(0, state.size_graph) if state.actions_pareto_def[assumed_def] == 1: break # determine the attacker reward for the assumed defense action rew_att = np.zeros(num_moves * 3, np.int).reshape(num_moves, 3) if isinstance(state, State): if isinstance(state, ChaosState): np.copyto( rew_att, state.reward_matrix[assumed_def * num_moves:(assumed_def + 1) * num_moves]) else: np.copyto(rew_att, state.reward_matrix[assumed_def, :, :]) # calculate the cosine of the angle between the attacker scalarization and the rewards cosine = np.zeros(len(indices), dtype=float) for i in range(len(indices)): cosine[i] = np.absolute( np.dot(scalar_att, rew_att[indices[i]]) / (att_mag * np.sqrt( np.einsum('i,i', rew_att[indices[i]], rew_att[indices[i]])))) # choose the strategy closest to the attacker scalarization action_att = indices[np.argmax(cosine)] # determine the valid defense actions based on the model and algorithm if isinstance(state, ChaosState): # determine the valid moves by using the chaos state method state.scalarized_attack_actions(action_att, run_type) else: if run_type == 2: # use the precalculated pareto front for the first move if steps == 0 and pareto_filter is not None: state.actions_pareto_def = pareto_filter else: state.pareto_defense_actions() else: # use entire set state.actions_pareto_def = state.actions_def # choose an actual attack action if np.random.rand() < (0.45 if run_type == 4 else 0.3): action_att = indices[np.random.randint(0, len(indices))] # choose a defense action if default_action >= 0: # default action assigned means we just want to run a single action once action_def = default_action steps = 200 elif np.random.rand() < epsilon: # random action for j in range(100): action_def = np.random.randint(0, state.size_graph) if state.actions_pareto_def[action_def] == 1: break else: # from Q(s,a) values action_def = np.argmax( np.multiply(state.actions_pareto_def, q_table[0] - min(q_table[0]))) # Take actions, observe new state np.copyto(nn_input_old, state.nn_input) state.make_move(action_att, action_def) # Update the score score_now = state.get_score(True) score_old = state.get_score(False) score = np.subtract(score_now, [0, 0, score_old[2]]) vector_reward_sum += score np.copyto(check_one, score) # calculate the reward # goal_scalarization = np.array([5, 5, 0], dtype=np.int) score = np.subtract(score, self.offset) np.copyto(check_two, score) score = np.divide(score * 100, self.normalizer) reward = np.dot(state.config.scalarization, score) / np.sum( state.config.scalarization) # reward = np.dot(goal_scalarization, score) / np.sum(goal_scalarization) np.copyto(check_three, score) # Get max_Q(S',a) if run_type > 0: q_table_new_state = self.model.predict(state.nn_input.reshape( 1, state.nn_input.size), batch_size=1) maxQ = np.max(q_table_new_state) # update the q_table update = (reward + (gamma * maxQ)) q_table[0][action_def] = update self.model.fit(nn_input_old.reshape(1, state.nn_input.size), q_table, batch_size=1, epochs=1, verbose=0) # move to the next state reward_sum += reward steps += 1 self.log_object.chosen_action += "{0}-{1}|".format( action_att, action_def) # output additional data # if default_action == -1: # # get the attack and defense type # type_of_att = -1 # type_of_def = -1 # if action_att != state.size_graph: # type_of_att = int(action_att % state.size_graph_cols < state.size_graph_col1) # if action_def != state.size_graph: # type_of_def = int(action_att % state.size_graph_cols < state.size_graph_col1) # # output the check one, two and three information # self.log_object.output_string2 += ("{0}) {1} {2} : {3} {4} : {5} {6} {7} \n".format( # steps, action_att, action_def, type_of_att, type_of_def, # check_one.astype(int), check_three.astype(int), int(reward))) # self.log_object.output_string2 += ("{0}) {1} : {2} {3} {4} {5} \n".format( # steps, action_def, check_one.astype(int), check_two.astype(int), # check_three.astype(int), int(reward))) # # output the q_table information # self.log_object.output_string2 += ("{0}) {1} : {2!r} \n".format( # steps, action_def, q_table.astype(int).tolist())) # q_table = self.model.predict(nn_input_old.reshape(1, state.nn_input.size), batch_size=1) # self.log_object.output_string2 += ("{0}) {1} : {2!r} \n".format( # steps, action_def, q_table.astype(int).tolist())) # update the log object self.log_object.reward_sum = reward_sum self.log_object.vector_reward_sum = vector_reward_sum self.log_object.step_count = steps
def power_iteration(lin_op, n_points=None, b_hat_0=None, max_iter=1000, tol=1e-7, random_state=None): """Estimate dominant eigenvalue of linear operator A. Parameters ---------- lin_op : callable or array Linear operator from which we estimate the largest eigenvalue. n_points : tuple Input shape of the linear operator `lin_op`. b_hat_0 : array, shape (n_points, ) Init vector. The estimated eigen-vector is stored inplace in `b_hat_0` to allow warm start of future call of this function with the same variable. Returns ------- mu_hat : float The largest eigenvalue """ if hasattr(lin_op, 'dot'): n_points = lin_op.shape[1] lin_op = lin_op.dot elif callable(lin_op): msg = ("power_iteration require n_points argument when lin_op is " "callable") assert n_points is not None, msg else: raise ValueError("lin_op should be a callable or a ndarray") rng = check_random_state(random_state) if b_hat_0 is None: b_hat = rng.rand(n_points) else: b_hat = b_hat_0 mu_hat = np.nan for ii in range(max_iter): b_hat = lin_op(b_hat) norm = np.linalg.norm(b_hat) if norm == 0: return 0 b_hat /= norm fb_hat = lin_op(b_hat) mu_old = mu_hat mu_hat = np.dot(b_hat, fb_hat) # note, we might exit the loop before b_hat converges # since we care only about mu_hat converging if (mu_hat - mu_old) / mu_old < tol: break assert not np.isnan(mu_hat) if b_hat_0 is not None: # copy inplace into b_hat_0 for next call to power_iteration np.copyto(b_hat_0, b_hat) return mu_hat