def modifyObject(Obj, m): r=randint(0,5) if r<4: modObj=np.rot90(Obj[0],r),np.rot90(Obj[1],r),np.rot90(Obj[2],r) elif r==4: modObj=np.flipud(Obj[0]),np.flipud(Obj[1]),np.flipud(Obj[2]) else: modObj=np.fliplr(Obj[0]),np.fliplr(Obj[1]),np.fliplr(Obj[2]) h,w = (modObj[0].T)[0].shape xf=w/2 xi=xf-w yf=h/2 yi=yf-h x0, y0 = m xi += x0 xf += x0 yi += y0 yf += y0 # return modObj, h, w return modObj, xi,xf,yi,yf
def convert_to_Q_no_geometry_correction(self): ''' No geometry correction Will convert the tof axis into a Q axis using Q range specified ''' _const = 4. * math.pi * constants.mn * self.oData.active_data.dMD / constants.h theta = self.oData.active_data.theta _q_axis = 1e-10 * _const * math.sin(self.oData.active_data.theta) / (self.oData.active_data.tof_axis * 1e-6) nbr_q = len(_q_axis) [nbr_pixel, nbr_tof] = np.shape(self.scaled_normalized_data) q_axis_2d = np.zeros((nbr_pixel, nbr_q)) for p in range(nbr_pixel): q_axis_2d[p,:] = _q_axis q_axis_2d = np.fliplr(q_axis_2d) scaled_normalized_data_reverse = np.fliplr(self.scaled_normalized_data) scaled_normalized_data_error_reverse = np.fliplr(self.scaled_normalized_data_error) self.q_axis = q_axis_2d self.scaled_normalized_data_reverse = scaled_normalized_data_reverse self.scaled_normalized_data_error_reverse = scaled_normalized_data_error_reverse self.logbook('--> Without geometry correction --- DONE', False)
def get_output(self, idx): img_id=idx/self.pertnum pert_id=idx%self.pertnum rot_id=pert_id%self.param['rotate'] off_id=pert_id/self.param['rotate'] [h, w]=self.output[img_id].shape [dy, dx]=self.get_offset(h, w, off_id) dy+=self.param['mrgsize'] dx+=self.param['mrgsize'] res=self.output[img_id][dy:dy+self.param['outsize'], dx:dx+self.param['outsize']] #res=np.rot90(res) #rotate 90 if rot_id==1: res=np.fliplr(res) elif rot_id==2: res=np.flipud(res).T elif rot_id==3: res=res.T elif rot_id==4: res=np.fliplr(res).T elif rot_id==5: res=np.flipud(res) elif rot_id==6: res=np.rot90(res,2) elif rot_id==7: res=np.rot90(res,2).T return res
def paduavals2coefs(f): useFFTwhenNisMoreThan = 100 m = len(f) n = int(round(-1.5 + np.sqrt(.25 + 2 * m))) x = padua_points(n) idx = _find_m(n) w = 0 * x[0] + 1. / (n * (n + 1)) idx1 = np.all(np.abs(x) == 1, axis=0) w[idx1] = .5 * w[idx1] idx2 = np.all(np.abs(x) != 1, axis=0) w[idx2] = 2 * w[idx2] G = np.zeros(idx.max() + 1) G[idx] = 4 * w * f if (n < useFFTwhenNisMoreThan): t1 = np.r_[0:n + 1].reshape(-1, 1) Tn1 = np.cos(t1 * t1.T * np.pi / n) t2 = np.r_[0:n + 2].reshape(-1, 1) Tn2 = np.cos(t2 * t2.T * np.pi / (n + 1)) C = np.dot(Tn2, np.dot(G, Tn1)) else: # dct = @(c) chebtech2.coeffs2vals(c); C = np.rot90(dct(dct(G.T).T)) #, axis=1) C[0] = .5 * C[0] C[:, 1] = .5 * C[:, 1] C[0, -1] = .5 * C[0, -1] del C[-1] # Take upper-left triangular part: return np.fliplr(np.triu(np.fliplr(C)))
def rotate_data(bg, overlay, slices_list, axis_name, shape): # Rotate the data as required # Return the rotated data, and an updated slice list if necessary if axis_name == 'axial': # Align so that right is right overlay = np.rot90(overlay) overlay = np.fliplr(overlay) bg = np.rot90(bg) bg = np.fliplr(bg) elif axis_name == 'coronal': overlay = np.rot90(overlay) bg = np.rot90(bg) overlay = np.flipud(np.swapaxes(overlay, 0, 2)) bg = np.flipud(np.swapaxes(bg, 0, 2)) slices_list[1] = [ shape - n - 3 for n in slices_list[1] ] elif axis_name == 'sagittal': overlay = np.flipud(np.swapaxes(overlay, 0, 2)) bg = np.flipud(np.swapaxes(bg, 0, 2)) else: print '\n************************' print 'ERROR: data could not be rotated\n' parser.print_help() sys.exit() return bg, overlay, slices_list
def get_image_quadrants(IM, reorient=False): """ Given an image (m,n) return its 4 quadrants Q0, Q1, Q2, Q3 as defined in abel.hansenlaw.iabel_hansenlaw Parameters: - IM: 1D or 2D array - reorient: reorient image as required by abel.hansenlaw.iabel_hansenlaw """ IM = np.atleast_2d(IM) n, m = IM.shape n_c = n//2 + n%2 m_c = m//2 + m%2 # define 4 quadrants of the image # see definition in abel.hansenlaw.iabel_hansenlaw Q1 = IM[:n_c, :m_c] Q2 = IM[-n_c:, :m_c] Q0 = IM[:n_c, -m_c:] Q3 = IM[-n_c:, -m_c:] if reorient: Q1 = np.fliplr(Q1) Q3 = np.flipud(Q3) Q2 = np.fliplr(np.flipud(Q2)) return Q0, Q1, Q2, Q3
def put_image_quadrants (Q,odd_size=True): """ Reassemble image from 4 quadrants Q = (Q0, Q1, Q2, Q3) The reverse process to get_image_quadrants() Qi defined in abel.hansenlaw.iabel_hansenlaw Parameters: - Q: tuple of numpy array quadrants - even_size: boolean, whether final image is even or odd pixel size odd size requires trimming 1 row from Q1, Q0, and 1 column from Q1, Q2 Returns: - rows x cols numpy array - the reassembled image """ if not odd_size: Top = np.concatenate((np.fliplr(Q[1]), Q[0]), axis=1) Bottom = np.flipud(np.concatenate((np.fliplr(Q[2]), Q[3]), axis=1)) else: # odd size image remove extra row/column added in get_image_quadrant() Top = np.concatenate((np.fliplr(Q[1][:-1,:-1]), Q[0][:-1,:]), axis=1) Bottom = np.flipud(np.concatenate((np.fliplr(Q[2][:,:-1]), Q[3]), axis=1)) IM = np.concatenate((Top,Bottom), axis=0) return IM
def get_map_lanes(self, size=None): if size is not None: img = Image.fromarray(self.map_image_lanes.astype(np.uint8)) img = img.resize((size[1], size[0]), Image.ANTIALIAS) img.load() return np.fliplr(np.asarray(img, dtype="int32")) return np.fliplr(self.map_image_lanes)
def wrapper(*args): x = args[0] w = args[1] if x.ndim == 3: w = np.flipud(w) w = np.transpose(w, (1, 2, 0)) if args[3] == 'channels_last': x = np.transpose(x, (0, 2, 1)) elif x.ndim == 4: w = np.fliplr(np.flipud(w)) w = np.transpose(w, (2, 3, 0, 1)) if args[3] == 'channels_last': x = np.transpose(x, (0, 3, 1, 2)) else: w = np.flip(np.fliplr(np.flipud(w)), axis=2) w = np.transpose(w, (3, 4, 0, 1, 2)) if args[3] == 'channels_last': x = np.transpose(x, (0, 4, 1, 2, 3)) y = func(x, w, args[2], args[3]) if args[3] == 'channels_last': if y.ndim == 3: y = np.transpose(y, (0, 2, 1)) elif y.ndim == 4: y = np.transpose(y, (0, 2, 3, 1)) else: y = np.transpose(y, (0, 2, 3, 4, 1)) return y
def predict_result(model,x_test,img_size_target): # predict both orginal and reflect x x_test_reflect = np.array([np.fliplr(x) for x in x_test]) preds_test1 = model.predict(x_test).reshape(-1, img_size_target, img_size_target) preds_test2_refect = model.predict(x_test_reflect).reshape(-1, img_size_target, img_size_target) preds_test2 = np.array([ np.fliplr(x) for x in preds_test2_refect] ) preds_avg = (preds_test1 +preds_test2)/2 return preds_avg
def load_images(random_state=1234): train_df = pd.read_csv("data/train.csv", index_col="id", usecols=[0]) depths_df = pd.read_csv("data/depths.csv", index_col="id") train_df = train_df.join(depths_df) test_df = depths_df[~depths_df.index.isin(train_df.index)] print(">>> train_df:",train_df.shape) print(train_df.head()) print(">>> test_df:", test_df.shape) print(test_df.head()) train_df["images"] = [gradmag(np.array(imread(path_train_images+"{}.png".format(idx)))) for idx in tqdm(train_df.index)] train_df["masks"] = [np.array(load_img(path_train_masks+"{}.png".format(idx),grayscale=True))/255 for idx in tqdm(train_df.index)] train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2) train_df["coverage_class"] = train_df.coverage.map(cov_to_class) print("*** TRAIN ***") print(train_df.head()) print("*** TEST ***") print(test_df.head()) ids_train, ids_valid, x_train, x_valid, y_train, y_valid, cov_train, cov_test, depth_train, depth_test = train_test_split( train_df.index.values, np.array(train_df.images.tolist()).reshape(-1, img_size_target, img_size_target, 1), np.array(train_df.masks.tolist()).reshape(-1, img_size_target, img_size_target, 1), train_df.coverage.values, train_df.z.values, test_size=0.2, stratify=train_df.coverage_class, random_state=random_state) #Data augmentation x_train2 = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0) y_train2 = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0) print(x_train2.shape) print(y_valid.shape) x_test = np.array([gradmag(np.array(imread(path_test_images+"{}.png".format(idx)))) for idx in tqdm(test_df.index)]).reshape(-1, img_size_target, img_size_target, 1) return x_train2, x_valid, y_train2, y_valid, x_test, test_df.index.values
def extract(self, img, output_shape, corners=None, hints=None): """Extract a frame from `img`. This function always corrects for perspective distortion and may correct for radial distortion.""" if img.dtype != np.uint8: raise ValueError('Can only operate on uint8.') if corners is None: corners = self.locate(img, hints=hints) if self.calibration_profile is not None and \ undistort.should_undistort(img, corners, self.calibration_profile): img, corners = undistort.undistort(img, corners, self.calibration_profile) corners = self.locate(img, hints=hints) # Crop image to corners (speeds up the perspective transform) img, corners = crop_to_corners(img, corners) # Compute perspective transform corners = np.fliplr(corners).astype(np.float32) dst_corners = np.array(output_shape) * ((0, 0), (1, 0), (1, 1), (0, 1)) dst_corners = np.fliplr(dst_corners).astype(np.float32) m = cv2.getPerspectiveTransform(corners, dst_corners) return cv2.warpPerspective(img, m, output_shape, flags=cv2.INTER_NEAREST)
def rforests(trainx, trainy, test, n_estimators=100, k=5): trainy = np.ravel(trainy) forest = RandomForestClassifier(n_estimators) forest.fit(trainx, trainy) prob_train = forest.predict_proba(trainx) prob_test = forest.predict_proba(test) # Since the index is the number of the country that's been chosen # we can use these with argsort to get the maximum 5., we will have to do this # for the entire matrix though. sort_train = np.argsort(prob_train)[:,-k:] sort_test = np.argsort(prob_test)[:,-k:] # Now we need to transform these back to countries, but to map I need to # have a dataframe. col_names = [] for i in range(k): name = "country_destination_" + str(i+1) col_names.append(name) pred_train = pd.DataFrame(sort_train, columns=col_names) pred_test = pd.DataFrame(sort_test, columns=col_names) for name in col_names: pred_train[name] = pred_train[name].map(dicts.country) pred_test[name] = pred_test[name].map(dicts.country) pred_train = np.fliplr(pred_train) pred_test = np.fliplr(pred_test) return forest, pred_train, pred_test
def process_image(self, scanparams, pointparams, edf): delta, omega, alfa, beta, chi, phi, mon, transm = pointparams wavelength, UB = scanparams image = edf.GetData(0) header = edf.GetHeader(0) weights = numpy.ones_like(image) if not self.config.centralpixel: self.config.centralpixel = (int(header['y_beam']), int(header['x_beam'])) if not self.config.sdd: self.config.sdd = float(header['det_sample_dist']) if self.config.background: data = image / mon else: data = image / mon / transm if mon == 0: raise errors.BackendError('Monitor is zero, this results in empty output. Scannumber = {0}, pointnumber = {1}. Did you forget to open the shutter?'.format(self.dbg_scanno, self.dbg_pointno)) util.status('{4}| beta: {0:.3f}, delta: {1:.3f}, omega: {2:.3f}, alfa: {3:.3f}'.format(beta, delta, omega, alfa, time.ctime(time.time()))) # pixels to angles pixelsize = numpy.array(self.config.pixelsize) sdd = self.config.sdd app = numpy.arctan(pixelsize / sdd) * 180 / numpy.pi centralpixel = self.config.centralpixel # (column, row) = (delta, gamma) beta_range= -app[1] * (numpy.arange(data.shape[1]) - centralpixel[1]) + beta delta_range= app[0] * (numpy.arange(data.shape[0]) - centralpixel[0]) + delta # masking if self.config.maskmatrix is not None: if self.config.maskmatrix.shape != data.shape: raise errors.BackendError('The mask matrix does not have the same shape as the images') weights *= self.config.maskmatrix delta_range = delta_range[self.config.ymask] beta_range = beta_range[self.config.xmask] weights = self.apply_mask(weights, self.config.xmask, self.config.ymask) intensity = self.apply_mask(data, self.config.xmask, self.config.ymask) intensity = numpy.rot90(intensity) intensity = numpy.fliplr(intensity) intensity = numpy.flipud(intensity) weights = numpy.rot90(weights) weights = numpy.fliplr(weights) weights = numpy.flipud(weights) #polarisation correction delta_grid, beta_grid = numpy.meshgrid(delta_range, beta_range) Pver = 1 - numpy.sin(delta_grid * numpy.pi / 180.)**2 * numpy.cos(beta_grid * numpy.pi / 180.)**2 #intensity /= Pver return intensity, weights, (wavelength, UB, beta_range, delta_range, omega, alfa, chi, phi)
def array_transpose(self, flip=False): """Transpose the arrays in strand coverage""" self.transpose_cov1 = [] self.transpose_cov2 = [] # print(self.coverage) for a in self.cov_sense_all: if flip: # print(a[:, 0]) # print(a[:, 1]) a1 = np.transpose(a[:, 0]) a1.shape = (a1.shape[0],1) self.transpose_cov1.append(np.fliplr(a1)) a2 = np.transpose(a[:, 0]) a2.shape = (a2.shape[0], 1) self.transpose_cov2.append(np.fliplr(a2)) else: # print(a[:, 0]) # print(a[:, 1]) a1 = np.transpose(a[:, 0]) a1.shape = (a1.shape[0], 1) self.transpose_cov1.append(a1) a2 = np.transpose(a[:, 1]) a2.shape = (a2.shape[0], 1) self.transpose_cov2.append(a2) self.transpose_cov1 = np.array(self.transpose_cov1) self.transpose_cov2 = np.array(self.transpose_cov2)
def flip_jet(jet, pool = 'r'): """ Takes a rotated jet (25, 25) from rotate_jet() and flips it across the vertical axis according to whether you want the right side (pool = {r, R, right, Right, ...}) or the left side (pool = {l, L, Left, left, ...}) to contain the most energy. """ weight = jet.sum(axis = 0) halfway = jet.shape[0] / 2. l, r = np.int(np.floor(halfway)), np.int(np.ceil(halfway)) l_weight, r_weight = np.sum(weight[:l]), np.sum(weight[r:]) if ('r' in pool.lower()) and ('l' in pool.lower()): raise ValueError('Jet pooling side must have l -OR- r in the name.') if 'r' in pool.lower(): if r_weight > l_weight: return jet return np.fliplr(jet) elif 'l' in pool.lower(): if l_weight > r_weight: return jet return np.fliplr(jet) else: raise ValueError('Jet pooling side must have l -OR- r in the name.')
def reflectEdges(self, width=None): """Extend the edges of the image by reflection. The corners aren't dealt with properly, but this might give some help when applying a hanningFilter after.""" # Extend the size of the image and do some bookkeeping. if width == None: width = min(self.nx, self.ny) / 4.0 self.zeroPad(width) # And then put reflected copy of data into the boundaries. # Reflect/flip left edge. xmin = self.padx xmax = self.padx * 2 ymin = self.pady ymax = self.ny - self.pady self.image[ymin:ymax, 0:xmin] = numpy.fliplr(self.image[ymin:ymax, xmin:xmax]) # Reflect/flip right edge xmin = self.nx - self.padx*2 xmax = self.nx - self.padx self.image[ymin:ymax, (self.nx-self.padx):self.nx] = numpy.fliplr(self.image[ymin:ymax, xmin:xmax]) # Reflect/flip bottom edge xmin = self.padx xmax = self.nx - self.padx ymin = self.padx ymax = self.padx * 2 self.image[0:self.pady, xmin:xmax] = numpy.flipud(self.image[ymin:ymax, xmin:xmax]) # Reflect/flip top edge ymin = self.ny - self.pady*2 ymax = self.ny - self.pady self.image[(self.ny - self.pady):self.ny, xmin:xmax] = numpy.flipud(self.image[ymin:ymax, xmin:xmax]) # I should interpolate over the corners, but .. todo. return
def load_augmented_image(path): """Return a image given path .33 proba to be original image .33 proba to be flipped image .33 proba to be shrinked --> (image between 200&100px) """ proba = rand() if proba < .33: img = load_image(path, resize=True) return img elif proba < .66: img = load_image(path, resize=True) return np.fliplr(img) else: # Load the background and the original image img_back = np.ones([224,224,3]) * rand(3) img_orig = load_image(path, resize=False) # Maybe flip the image if rand()>.5: img_orig = np.fliplr(img_orig) # Reshape original image (to fit to a max size of 200px) max_new_shape = max(img_orig.shape) downscale_factor = round(rand()*100+100) / max_new_shape img_orig = rescale(img_orig, downscale_factor) # Put img_orig on the background yy,xx,_ = img_orig.shape y, x ,_ = img_back.shape y = int(rand()*(y-yy)) x = int(rand()*(x-xx)) img_back[y:yy+y,x:xx+x] = img_orig return img_back
def calc_vel_matrix(px,py,vd,c): x_init=-46.30; x_end=-46.85; y_init=-23.37; y_end=-23.92; interval=0.55/(c-1); v_mtr=np.zeros((c,c),dtype=np.float64); q_mtr=np.zeros((c,c),dtype=np.int32); py_cache=[]; px_cache=[]; v_cache=[]; for j in range(0,c): y_inds=((py>=y_end+interval*j)&(py<y_end+interval*(j+1))); py_cache.append(py[y_inds]); px_cache.append(px[y_inds]); v_cache.append(vd[y_inds]); if(j%10==0): print("cache-" + str(c-j)); for i in range(0,c): for j in range(0, c): x_inds=((px_cache[j]>=x_end+interval*i)&(px_cache[j]<x_end+interval*(i+1))); if(len(x_inds)<1): continue; v_sq=v_cache[j][x_inds]; q_mtr[i,j]=len(v_sq); v_mtr[i,j]=np.median(v_sq); if(i%100==0): print(c-i); v_mtr=np.transpose(np.fliplr(v_mtr)); q_mtr=np.transpose(np.fliplr(q_mtr)); return {'v_mtr':v_mtr, 'qtd_mtr':q_mtr};
def updateDisplayRGB(self, auto = False): """ Make an RGB image (N, M, 3) (pyqt will interprate this as RGB automatically) with masked pixels shown in blue at the maximum value of the cspad. This ensures that the masked pixels are shown at full brightness. """ if self.geom_fnam is not None : self.cspad_geom[self.pixel_maps[0], self.pixel_maps[1]] = self.cspad.ravel() self.mask_geom[self.pixel_maps[0], self.pixel_maps[1]] = self.mask.ravel() trans = np.fliplr(self.cspad_geom.T) trans_mask = np.fliplr(self.mask_geom.T) # # I need to make the mask True between the asics... trans_mask[self.background] = True else : trans = np.fliplr(self.cspad.T) trans_mask = np.fliplr(self.mask.T) self.cspad_max = self.cspad.max() # convert to RGB # Set masked pixels to B display_data = np.zeros((trans.shape[0], trans.shape[1], 3), dtype = self.cspad.dtype) display_data[:, :, 0] = trans * trans_mask display_data[:, :, 1] = trans * trans_mask display_data[:, :, 2] = trans + (self.cspad_max - trans) * ~trans_mask self.display_RGB = display_data if auto : self.plot.setImage(self.display_RGB) else : self.plot.setImage(self.display_RGB, autoRange = False, autoLevels = False, autoHistogramRange = False)
def mirror_edges(X, nPixels): """Given a tensor X with dimension (z, c, row, col) produces a new tensor with dimensions (z, c, row+2*nPixels, row+2*nPixels) tensor with an "outer border" created by mirroring pixels along the outer border of X """ assert(nPixels > 0) z,c,m,n = X.shape Xm = np.zeros((z, c, m+2*nPixels, n+2*nPixels), dtype=X.dtype) # the interior of Xm is just X Xm[:, :, nPixels:m+nPixels, nPixels:n+nPixels] = X # Note we do *not* replicate the pixel on the outer edge of the original image. for ii in range(z): for jj in range(c): # left edge Xm[ii,jj, :, 0:nPixels] = np.fliplr(Xm[ii,jj, :, (nPixels+1):(2*nPixels+1)]) # right edge Xm[ii,jj, :, -nPixels:] = np.fliplr(Xm[ii,jj, :, (-2*nPixels-1):(-nPixels-1)]) # top edge (fills in corners) Xm[ii,jj, 0:nPixels, :] = np.flipud(Xm[ii,jj, (nPixels+1):(2*nPixels+1), :]) # bottom edge (fills in corners) Xm[ii,jj, -nPixels:, :] = np.flipud(Xm[ii,jj, (-2*nPixels-1):(-nPixels-1), :]) return Xm
def fliplr(x): if x.ndim == 3: x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1)) elif x.ndim == 4: for i in range(x.shape[0]): x[i] = np.transpose(np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1)) return x.astype(float)
def save(self, config, args): """ save LSDMap object in .lsdmap file and eigenvalues/eigenvectors in .eg/.ev files """ if isinstance(self.struct_filename, list): struct_filename = self.struct_filename[0] else: struct_filename = self.struct_filename path, ext = os.path.splitext(struct_filename) np.savetxt(path + '.eg', np.fliplr(self.eigs[np.newaxis]), fmt='%9.6f') np.savetxt(path + '.ev', np.fliplr(self.evs), fmt='%.18e') #np.save(path + '_eg.npy', np.fliplr(self.eigs[np.newaxis])) #np.save(path + '_ev.npy', np.fliplr(self.evs)) if args.output_file is None: try: lsdmap_filename = config.get('LSDMAP', 'lsdmfile') except: return else: lsdmap_filename = args.output_file with open(lsdmap_filename, "w") as file: pickle.dump(self, file)
def fetcher(self): try: for i in xrange(self.batch_size_): sample, fname, label = self.jpeg_pack_.get(self.param_['segment'], self.index, self.param_['color'], self.mean_sub_) if self.crop_: if self.output2_: cx = random.randint(0, (sample.shape[0] - self.crop_dim_[0])/self.ratio) * self.ratio cy = random.randint(0, (sample.shape[1] - self.crop_dim_[1])/self.ratio) * self.ratio else: cx = random.randint(0, (sample.shape[0] - self.crop_dim_[0])) cy = random.randint(0, (sample.shape[1] - self.crop_dim_[1])) sample = sample[cx:cx+self.crop_dim_[0], cy:cy+self.crop_dim_[1], :] if self.mirror_: flag_mirror = random.random() < 0.5 if flag_mirror: sample = numpy.fliplr(sample) self.buffer[i,...] = sample.transpose((2,0,1)) * self.scale_ if self.output_label: self.label_buffer[i,0,0,0] = label if self.output2_: sample2, fname, label = self.jpeg_pack2_.get(self.param_['segment2'], self.index, self.param_['color2'], self.mean_sub2_) if self.crop_: cx2 = cx / self.ratio cy2 = cy / self.ratio sample2 = sample2[cx2:cx2+self.crop_dim2_[0], cy2:cy2+self.crop_dim2_[1]] if self.mirror_ and flag_mirror: sample2 = numpy.fliplr(sample2) self.buffer2[i,...] = sample2.transpose((2,0,1)) * self.scale2_ self.index += 1 except: self.worker_succeed = False raise else: self.worker_succeed = True
def recale(matrix): l = len(matrix) bigmat = np.zeros([2*l,2*l]) bigmat[l:2*l,l:2*l] = matrix bigmat[l:2*l,0:l] = np.fliplr(matrix) bigmat[0:l] = np.transpose(np.fliplr(np.transpose(bigmat[l:2*l]))) return bigmat
def gridVisDVF(dvfImFileName,sliceNum = -1,titleString = 'DVF',saveFigPath ='.',deformedImFileName = None, contourNum=40): dvf = sitk.ReadImage(dvfImFileName) dvfIm = sitk.GetArrayFromImage(dvf) # get numpy array z_dim, y_dim, x_dim, channels = dvfIm.shape # get 3D volume shape if not (channels == 3 ): print "dvf image expected to have three scalor channels" if sliceNum == -1: sliceNum = z_dim/2 [gridX,gridY]=np.meshgrid(np.arange(1,x_dim+1),np.arange(1,y_dim+1)) fig = plt.figure() if deformedImFileName : bgGray = sitk.ReadImage(deformedImFileName) bgGrayIm = sitk.GetArrayFromImage(bgGray) # get numpy array plt.imshow(np.fliplr(np.flipud(bgGrayIm[sliceNum,:,:])),cmap=plt.cm.gray) idMap = np.zeros(dvfIm.shape) for i in range(z_dim): for j in range(y_dim): for k in range(x_dim): idMap[i,j,k,0] = i idMap[i,j,k,1] = j idMap[i,j,k,2] = k mapIm = dvfIm + idMap CS = plt.contour(gridX,gridY,np.fliplr(np.flipud(mapIm[sliceNum,:,:,1])), contourNum, hold='on', colors='red') CS = plt.contour(gridX,gridY,np.fliplr(np.flipud(mapIm[sliceNum,:,:,2])), contourNum, hold='on', colors='red') plt.title(titleString) plt.savefig(saveFigPath + '/' + titleString) fig.clf() plt.close(fig) return
def center_normTrace_decomp(K): print 'centering kernel' #### Get transformed features for K_train that DONT snoop when centering, tracing, or eiging##### Kcent=KernelCenterer() Ktrain=Kcent.fit_transform(K[:in_samples,:in_samples]) #Ktrain=Ktrain/float(np.trace(Ktrain)) #[EigVals,EigVectors]=scipy.sparse.linalg.eigsh(Ktrain,k=reduced_dimen,which='LM') [EigVals,EigVectors]=scipy.linalg.eigh(Ktrain,eigvals=(in_samples-reduced_dimen,in_samples-1)) for i in range(len(EigVals)): if EigVals[i]<=0: EigVals[i]=0 EigVals=np.flipud(np.fliplr(np.diag(EigVals))) EigVectors=np.fliplr(EigVectors) Ktrain_decomp=np.dot(EigVectors,scipy.linalg.sqrtm(EigVals)) #### Get transformed features for K_test using K_train implied mapping #### Kcent=KernelCenterer() Kfull=Kcent.fit_transform(K) #Kfull=Kfull/float(np.trace(Kfull)) K_train_test=Kfull[in_samples:,:in_samples] Ktest_decomp=np.dot(K_train_test,np.linalg.pinv(Ktrain_decomp.T)) ####combine mapped train and test vectors and normalize each vector#### Kdecomp=np.vstack((Ktrain_decomp,Ktest_decomp)) print 'doing normalization' Kdecomp=normalize(Kdecomp,copy=False) return Kdecomp
def phase_diagram(self,updown,leftright,xlab,ylab): mdense = np.loadtxt("mdense.txt", delimiter=',') m1d = np.loadtxt("m1d.txt", delimiter=',') m2d = np.loadtxt("m2d.txt", delimiter=',') mdis = np.loadtxt("mdis.txt", delimiter=',') mtotal = np.loadtxt('mtotal.txt',delimiter=',') mdense_p = mdense/mtotal m1d_p = m1d/mtotal m2d_p = m2d/mtotal if updown: mdense_p = np.flipud(mdense_p) m1d_p = np.flipud(m1d_p) m2d_p = np.flipud(m2d_p) if leftright: mdense_p = np.fliplr(mdense_p) m1d_p = np.fliplr(m1d_p) m2d_p = np.fliplr(m2d_p) r = m1d_p g = m2d_p b = mdense_p rgb = np.dstack((r,g,b)) im = Image.fromarray(np.uint8(rgb*255.999)) plt.imshow(im,extent=[0.125,1.125,self.nmet_init/self.num_mol,self.nmet_max/self.num_mol],aspect="auto") plt.xlabel(xlab) plt.ylabel(ylab)
def ps_batch (self): x_batch = np.zeros([CONST.lenPATCH, CONST.lenPATCH, CONST.COLOR_IN]).astype('float32') y_batch = np.zeros([CONST.lenPATCH, CONST.lenPATCH, CONST.COLOR_IN]).astype('float32') rand_index = self.index_list[0] self.index_list = self.index_list[1:] x_batch = self.dset_train[1][:,:,rand_index] y_batch = self.dset_train[2][:,:,rand_index] x_batch = np.reshape(x_batch, (CONST.lenPATCH, CONST.lenPATCH, 1 ) ) y_batch = np.reshape(y_batch, (CONST.lenPATCH, CONST.lenPATCH, 1 ) ) ## Data Augmentation if random.randint(0,1) : x_batch = np.fliplr(x_batch) y_batch = np.fliplr(y_batch) if random.randint(0,1) : x_batch = np.flipud(x_batch) y_batch = np.flipud(y_batch) rand_rot = random.randint(0,3) x_batch = np.rot90(x_batch, rand_rot) y_batch = np.rot90(y_batch, rand_rot) return np.array([x_batch, y_batch])
def load_batch(self, batch_size=1, is_testing=False): data_type = "train" if not is_testing else "val" path_A = glob('./datasets/%s/%sA/*' % (self.dataset_name, data_type)) path_B = glob('./datasets/%s/%sB/*' % (self.dataset_name, data_type)) self.n_batches = int(min(len(path_A), len(path_B)) / batch_size) total_samples = self.n_batches * batch_size # Sample n_batches * batch_size from each path list so that model sees all # samples from both domains path_A = np.random.choice(path_A, total_samples, replace=False) path_B = np.random.choice(path_B, total_samples, replace=False) for i in range(self.n_batches-1): batch_A = path_A[i*batch_size:(i+1)*batch_size] batch_B = path_B[i*batch_size:(i+1)*batch_size] imgs_A, imgs_B = [], [] for img_A, img_B in zip(batch_A, batch_B): img_A = self.imread(img_A) img_B = self.imread(img_B) img_A = scipy.misc.imresize(img_A, self.img_res) img_B = scipy.misc.imresize(img_B, self.img_res) if not is_testing and np.random.random() > 0.5: img_A = np.fliplr(img_A) img_B = np.fliplr(img_B) imgs_A.append(img_A) imgs_B.append(img_B) imgs_A = np.array(imgs_A)/127.5 - 1. imgs_B = np.array(imgs_B)/127.5 - 1. yield imgs_A, imgs_B
axs[0].set_xlabel("Coverage") axs[1].set_xlabel("Coverage class") # # Plotting the depth distributions¶ # sns.distplot(train_df.z, label="Train") # sns.distplot(test_df.z, label="Test") # plt.legend() # plt.title("Depth distribution") # Create train/validation split stratified by salt coverage ids_train, ids_valid, x_train, x_valid, y_train, y_valid, cov_train, cov_test, depth_train, depth_test = train_test_split( train_df.index.values, np.array(train_df.images.map(upsample).tolist()).reshape( -1, img_size_target, img_size_target, 1), np.array(train_df.masks.map(upsample).tolist()).reshape( -1, img_size_target, img_size_target, 1), train_df.coverage.values, train_df.z.values, test_size=0.2, stratify=train_df.coverage_class, random_state=1234) # data augmentation x_train = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0) y_train = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0) print(x_train.shape) print(y_valid.shape) train() predict()
def mirror(image_path, direction='all'): """Returns an array of image(s) that are the mirrored form of the original image Mirror takes the path to an image and generates a mirrored version of that image in the horizontal direction, vertical direction, or both. It returns an array of pixel values for the mirrored image(s). Inputs: ------- image_path: string The file path of the image to be mirrored. direction: string Direction of mirroring. Options: 'horizontal', 'vertical', 'all' Default: 'all' Returns: -------- mirrored_images: np.array """ try: # check for valid input parameters if not isinstance(image_path, str): raise TypeError("The file path must be a string.") if not isinstance(direction, str): raise TypeError( "The direction must be a string: 'horizontal', 'vertical', or 'all'" ) if not direction.lower() in ["horizontal", "vertical", "all"]: raise ValueError( "The direction must be 'horizontal', 'vertical' or 'all'") img = imread(image_path) except TypeError as e: print( "Invalid parameter types. Correct parameter types: image_path: str, num_images: int, direction: string" ) raise e except ValueError as e: print( "Invalid direction value. The direction must be a string: 'horizontal', 'vertical', or 'all'" ) raise e except FileNotFoundError as e: raise e # import image as array mirrored_images = [img] # flip horizontally if direction.lower() == 'horizontal' or direction.lower() == 'all': horiz_image = np.fliplr(img) mirrored_images.append(horiz_image) # flip vertically if direction.lower() == 'vertical' or direction.lower() == 'all': vert_image = img[::-1] mirrored_images.append(vert_image) # convert mirrored_images back to an array mirrored_images = np.asarray(mirrored_images) return mirrored_images
rawCapture = PiRGBArray(camera, size=(640, 480)) # allow the camera to warmup time.sleep(0.1) fgbg = cv2.createBackgroundSubtractorKNN(history_length, dist2threshold, detectShadows) for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image, then initialize the timestamp # and occupied/unoccupied text image = frame.array fgmask = fgbg.apply(image) fgmask = np.fliplr(fgmask) fgmask_crop = fgmask[0:480, 80:560] small = cv2.resize(fgmask_crop, (0, 0), fx=0.133, fy=0.133) cv2.imshow('KNN', fgmask_crop) # show the frame #cv2.imshow("Frame2", image) key = cv2.waitKey(1) & 0xFF # clear the stream in preparation for the next frame rawCapture.truncate(0) # if the `q` key was pressed, break from the loop if key == ord("q"):
def __next__(self): self.count += 1 if self.count == self.nB: raise StopIteration ia = self.count * self.batch_size ib = min((self.count + 1) * self.batch_size, self.nF) if self.multi_scale: # Multi-Scale YOLO Training height = random.choice(range(10, 20)) * 32 # 320 - 608 pixels else: # Fixed-Scale YOLO Training height = self.height img_all = [] labels_all = [] for index, files_index in enumerate(range(ia, ib)): img_path = self.img_files[self.shuffled_vector[files_index]] label_path = self.label_files[self.shuffled_vector[files_index]] img = cv2.imread(os.path.join("data_train",img_path)) # BGR if img is None: continue augment_hsv = True if self.augment and augment_hsv: # SV augmentation by 50% #fraction = 0.50 fraction = random.uniform(0,1) img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) S = img_hsv[:, :, 1].astype(np.float32) V = img_hsv[:, :, 2].astype(np.float32) a = (random.random() * 2 - 1) * fraction + 1 S *= a if a > 1: np.clip(S, a_min=0, a_max=255, out=S) a = (random.random() * 2 - 1) * fraction + 1 V *= a if a > 1: np.clip(V, a_min=0, a_max=255, out=V) img_hsv[:, :, 1] = S.astype(np.uint8) img_hsv[:, :, 2] = V.astype(np.uint8) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) h, w, _ = img.shape img, ratio, padw, padh = letterbox(img, height=height) # Load labels if os.path.isfile(os.path.join("labels",label_path)): with warnings.catch_warnings(): warnings.simplefilter("ignore") labels0 = np.loadtxt(os.path.join("labels",label_path), dtype=np.float32).reshape(-1, 5) #print(os.path.join("data_train",label_path)) #data = np.loadtxt(myfile, unpack=True) #labels0 = np.loadtxt(os.path.join("data_train",label_path), dtype=np.float32).reshape(-1, 5) #print(labels0) #print() # Normalized xywh to pixel xyxy format labels = labels0.copy() #labels[:, 0] = np.int32(labels[:, 0]) labels[:, 1] = ratio * w * (labels0[:, 1] - labels0[:, 3] / 2) + padw labels[:, 2] = ratio * h * (labels0[:, 2] - labels0[:, 4] / 2) + padh labels[:, 3] = ratio * w * (labels0[:, 1] + labels0[:, 3] / 2) + padw labels[:, 4] = ratio * h * (labels0[:, 2] + labels0[:, 4] / 2) + padh #print(labels) else: print(os.path.join("data_train",label_path)) print("st wrong") labels = np.array([]) # Augment image and labels if self.augment: type = random.randint(1,4) if type == 1: img, labels, M = random_affine(img, labels, degrees=(-20, 20), translate=(0., 0.), scale=(1., 1.)) if type == 2 : img, labels, M = random_affine(img,labels,degrees=(0, 0), translate=(0., 0.),scale=(0.3, 2.)) if type == 3 : img, labels, M = random_affine(img,labels,degrees=(0, 0), translate=(-0.3, 0.3),scale=(1., 1.)) if type == 4 : img, labels, M = random_affine(img, labels, degrees=(-5, 5), translate=(0.10, 0.10), scale=(0.90, 1.1)) plotFlag = False if plotFlag: import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) if index == 0 else None plt.subplot(4, 4, index + 1).imshow(img[:, :, ::-1]) plt.plot(labels[:, [1, 3, 3, 1, 1]].T, labels[:, [2, 2, 4, 4, 2]].T, '.-') plt.axis('off') nL = len(labels) if nL > 0: # convert xyxy to xywh labels[:, 1:5] = xyxy2xywh(labels[:, 1:5].copy()) / height # labels[:, 1:5] = xyxy2xywh(labels[:, 1:5].copy()) / height # print(os.path.join("data_train",label_path)) # print(labels) if self.augment: # random left-right flip lr_flip = True if lr_flip & (random.random() > 0.5): img = np.fliplr(img) if nL > 0: labels[:, 1] = 1 - labels[:, 1] # random up-down flip ud_flip = False if ud_flip & (random.random() > 0.5): img = np.flipud(img) if nL > 0: labels[:, 2] = 1 - labels[:, 2] img_all.append(img) labels_all.append(torch.from_numpy(labels)) # Normalize img_all = np.stack(img_all)[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB and cv2 to pytorch img_all = np.ascontiguousarray(img_all, dtype=np.float32) img_all /= 255.0 return torch.from_numpy(img_all), labels_all
def run(self): effect_config = self._device.device_config["effects"][ "effect_wavelength"] led_count = self._device.device_config["LED_Count"] led_mid = self._device.device_config["LED_Mid"] audio_data = self.get_audio_data() y = self.get_mel(audio_data) if y is None: return y = np.copy(self._math_service.interpolate(y, led_count // 2)) self._dsp.common_mode.update(y) diff = y - self.prev_spectrum self.prev_spectrum = np.copy(y) # Color channel mappings. r = self._dsp.r_filt.update(y - self._dsp.common_mode.value) g = np.abs(diff) b = self._dsp.b_filt.update(np.copy(y)) r = np.array([j for i in zip(r, r) for j in i]) output = np.array([ self._color_service.full_gradients[effect_config["color_mode"]][0][ (led_count if effect_config["reverse_grad"] else 0): (None if effect_config["reverse_grad"] else led_count):] * r, self._color_service.full_gradients[effect_config["color_mode"]][1][ (led_count if effect_config["reverse_grad"] else 0): (None if effect_config["reverse_grad"] else led_count):] * r, self._color_service.full_gradients[effect_config["color_mode"]][2][ (led_count if effect_config["reverse_grad"] else 0): (None if effect_config["reverse_grad"] else led_count):] * r ]) # Calculate how many steps the array will roll. steps = self.get_roll_steps(effect_config["roll_speed"]) self._color_service.full_gradients[ effect_config["color_mode"]] = np.roll( self._color_service.full_gradients[ effect_config["color_mode"]], steps * (-1 if effect_config["reverse_roll"] else 1), axis=1) output[0] = gaussian_filter1d(output[0], sigma=effect_config["blur"]) output[1] = gaussian_filter1d(output[1], sigma=effect_config["blur"]) output[2] = gaussian_filter1d(output[2], sigma=effect_config["blur"]) if effect_config["flip_lr"]: output = np.fliplr(output) if effect_config["mirror"]: # Calculate the real mid. real_mid = led_count / 2 # Add some tolerance for the real mid. if (real_mid >= led_mid - 2) and (real_mid <= led_mid + 2): # Use the option with shrinking the array. output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1) else: # Mirror the whole array. After this the array has a two times bigger size than led_count. big_mirrored_array = np.concatenate( (output[:, ::-1], output[:, ::1]), axis=1) start_of_array = led_count - led_mid end_of_array = start_of_array + led_count output = big_mirrored_array[:, start_of_array:end_of_array] self.queue_output_array_noneblocking(output)
def __getitem__(self, idx): # https://github.com/keras-team/keras/issues/3675#issuecomment-347697970 if idx % 35 == 0: # approx once per minute during first epoch on standard_p100 t0 = time() gc.collect() t1 = time() print("Garbage collected batch %s in %.2fs" % (idx, t1 - t0)) i = idx * self.batch_size length = min(self.batch_size, (len(self.names) - i)) batch_x = np.empty((length, img_rows, img_cols, channel), dtype=np.float32) batch_y = np.empty((length, img_rows, img_cols, 2), dtype=np.float32) bad_images = [] # 1. Maybe pre-fetch the batch # # Construct the paths to download paths = [] for i_batch in range(length): name = self.names[i] fcount = int(name.split('.')[0].split('_')[0]) bcount = int(name.split('.')[0].split('_')[1]) im_name = fg_files[fcount] bg_name = bg_files[bcount] paths.append((fg_base_path + im_name, a_base_path + im_name, bg_base_path + bg_name)) fg_cache_dir = os.path.join(cache_dir, 'fg') a_cache_dir = os.path.join(cache_dir, 'a') bg_cache_dir = os.path.join(cache_dir, 'bg') # Check whether they're cached is_cached = False for fg_path, _, _ in paths: if mio.is_cached(fg_path, fg_cache_dir): is_cached = True break if not is_cached: paths_by_dir = defaultdict(list) paths_by_dir[fg_cache_dir].extend([p[0] for p in paths]) paths_by_dir[a_cache_dir].extend([p[1] for p in paths]) paths_by_dir[bg_cache_dir].extend([p[2] for p in paths]) # Cache the batch! retry = 0 while True: try: mio.batch_cache(paths_by_dir) break except Exception as e: retry = retry + 1 if retry >= 5: raise e sleep(1) # 2. Now process for i_batch in range(length): fg_path, a_path, bg_path = paths[i_batch] fg = mio.imread(fg_path, cache_dir=fg_cache_dir) a = mio.imread(a_path, flags=0, cache_dir=a_cache_dir) bg = mio.imread(bg_path, cache_dir=bg_cache_dir) if fg is None or a is None or bg is None: if fg is None: bad = fg_path elif a is None: bad = a_path else: bad = bg_path print("Bad image: %s" % bad) bad_images.append(i_batch) print("Skipping bad image") i += 1 continue image, alpha, fg, bg = process(fg, a, bg) trimap = generate_trimap(alpha) if not skip_crop: # crop size 320:640:480 = 1:1:1 different_sizes = [(320, 320), (480, 480), (640, 640)] crop_size = random.choice(different_sizes) x, y = random_choice(trimap, crop_size) image = safe_crop(image, x, y, crop_size) alpha = safe_crop(alpha, x, y, crop_size) else: h, w = image.shape[:2] x = 0 if img_cols == w else (w - img_cols) // 2 y = 0 if img_rows == h else (h - img_rows) // 2 image = crop(image, x, y, (img_rows, img_cols)) alpha = crop(alpha, x, y, (img_rows, img_cols)) if channel == 4: trimap = generate_trimap(alpha) # Flip array left to right randomly (prob=1:1) if np.random.random_sample() > 0.5: image = np.fliplr(image) alpha = np.fliplr(alpha) if channel == 4: trimap = np.fliplr(trimap) batch_x[i_batch, :, :, 0:3] = image / 255. if channel == 4: batch_x[i_batch, :, :, 3] = trimap / 255. if channel == 4: mask = np.equal(trimap, 128).astype(np.float32) else: mask = np.ones((img_rows, img_cols)) batch_y[i_batch, :, :, 0] = alpha / 255. batch_y[i_batch, :, :, 1] = mask i += 1 if bad_images: if len(bad_images) == length: print("WARNING: Empty batch!") else: batch_x = np.delete(batch_x, bad_images, 0) batch_y = np.delete(batch_y, bad_images, 0) return batch_x, batch_y
def __getitem__(self, index): try: # get paths and loader and dploader idx = index % self.count_datasets paths_grp = self.datasets[idx][index // self.count_datasets] loader = self.datasets[idx].img_loader dploader = self.datasets[idx].disp_loader # loader image and disp tn = len(paths_grp) assert tn >= self.n filename = os.path.basename(paths_grp[0]) if (Test): logger.info('load {} ...'.format(filename)) left = np.array(loader(paths_grp[0])) right = np.array(loader(paths_grp[1])) disp_left = None disp_right = None # Random Horizontal Flip if (self.training and 0 == tn % 2 and rand() > 0.5): left_t = np.fliplr(right).copy() right = np.fliplr(left).copy() left = left_t if (tn == 4): disp_left = np.fliplr(dploader(paths_grp[3])).copy() if (self.n == 4): disp_right = np.fliplr(dploader(paths_grp[2])).copy() else: if (self.n >= 3): disp_left = np.ascontiguousarray(dploader(paths_grp[2])) if (self.n >= 4): disp_right = np.ascontiguousarray(dploader(paths_grp[3])) # Random crop if self.crop_size is not None: crop_size = [self.crop_size[1], self.crop_size[0]] fun_crop = RandomCrop(left.shape[:2], crop_size, self.training) left = fun_crop(left) right = fun_crop(right) disp_left = fun_crop(disp_left) disp_right = fun_crop(disp_right) # preprocess process = self._process() left = process(left) right = process(right) disp_left = self._refill_invalid_disp(disp_left) disp_right = self._refill_invalid_disp(disp_right) # return if (Test): logger.info('{} loaded'.format(filename)) if (self.n == 2): return filename, left, right elif (self.n == 3): return filename, left, right, disp_left[None] elif (self.n == 4): return filename, left, right, disp_left[None], disp_right[None] except Exception as err: logger.error(traceback.format_exc()) msg = '[ Loadering data ] An exception happened: %s \n\t left: %s' % ( str(err), paths_grp[0]) logger.error(msg) index = randint(0, len(self) - 1) return self.__getitem__(index)
def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] if mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None # MixUp https://arxiv.org/pdf/1710.09412.pdf if random.random() < hyp['mixup']: img2, labels2 = load_mosaic( self, random.randint(0, self.n - 1)) r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 img = (img * r + img2 * (1 - r)).astype(np.uint8) labels = np.concatenate((labels, labels2), 0) else: # Load image img, (h0, w0), (h, w) = load_image(self, index) # Letterbox # final letterboxed shape shape = self.batch_shapes[self.batch[index] ] if self.rect else self.img_size img, ratio, pad = letterbox( img, shape, auto=False, scaleup=self.augment) # for COCO mAP rescaling shapes = (h0, w0), ((h / h0, w / w0), pad) labels = self.labels[index].copy() if labels.size: # normalized xywh to pixel xyxy format labels[:, 1:] = xywhn2xyxy( labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: # Augment imagespace if not mosaic: img, labels = random_perspective(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear'], perspective=hyp['perspective']) # Augment colorspace augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Apply cutouts # if random.random() < 0.9: # labels = cutout(img, labels) nL = len(labels) # number of labels if nL: labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 if self.augment: # flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) if nL: labels[:, 2] = 1 - labels[:, 2] # flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) if nL: labels[:, 1] = 1 - labels[:, 1] labels_out = torch.zeros((nL, 6)) if nL: labels_out[:, 1:] = torch.from_numpy(labels) # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes
def display_live(image, boxes, masks, class_ids, class_names, scores=None, title="", figsize=(16, 16), ax=None, show_mask=True, show_bbox=True, colors=None, captions=None): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: [num_instances] class_names: list of class names of the dataset scores: (optional) confidence scores for each box title: (optional) Figure title show_mask, show_bbox: To show masks and bounding boxes or not figsize: (optional) the size of the image colors: (optional) An array or colors to use with each object captions: (optional) A list of strings to use as captions for each object """ # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] # If no axis is passed, create one and automatically call show() auto_show = False if not ax: _, ax = plt.subplots(1, figsize=figsize) auto_show = True # Generate random colors colors = colors or visualize.random_colors(N) # Show area outside image boundaries. height, width = image.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] if show_bbox: p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # Label if not captions: class_id = class_ids[i] score = scores[i] if scores is not None else None label = class_names[class_id] caption = "{} {:.3f}".format(label, score) if score else label else: caption = captions[i] ax.text(x1, y1 + 8, caption, color='w', size=11, backgroundcolor="none") # Mask mask = masks[:, :, i] if show_mask: masked_image = visualize.apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = visualize.find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) return masked_image ax.imshow(masked_image.astype(np.uint8)) if auto_show: plt.show()
def random_flip_leftright(batch): for i in range(len(batch)): if bool(random.getrandbits(1)): batch[i] = np.fliplr(batch[i]) return batch
def draw_instances(config, image, depth, boxes, masks, class_ids, parameters, scores=None, title="", figsize=(16, 16), ax=None, draw_mask=False, transform_planes=False, statistics=[], detection_flags={}): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: [num_instances] class_names: list of class names of the dataset scores: (optional) confidence scores for each box figsize: (optional) the size of the image. """ ## Number of instances N = len(boxes) if not N: pass else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] ## Generate random colors instance_colors = ColorPalette(N).getColorMap(returnTuples=True) if len(detection_flags) and False: for index in range(N): if detection_flags[index] < 0.5: instance_colors[index] = (128, 128, 128) pass continue pass class_colors = ColorPalette(11).getColorMap(returnTuples=True) class_colors[0] = (128, 128, 128) ## Show area outside image boundaries. height, width = image.shape[:2] masked_image = image.astype(np.uint8).copy() normal_image = np.zeros(image.shape) depth_image = depth.copy() for i in range(N): ## Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] ## Label class_id = class_ids[i] score = scores[i] if scores is not None else None x = random.randint(x1, (x1 + x2) // 2) ## Mask mask = masks[:, :, i] masked_image = apply_mask(masked_image.astype(np.float32), mask, instance_colors[i]).astype(np.uint8) ## Mask Polygon ## Pad to ensure proper polygons for masks that touch image edges. if draw_mask: padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: ## Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 cv2.polylines(masked_image, np.expand_dims(verts.astype(np.int32), 0), True, color=class_colors[class_id]) continue continue normal_image = drawNormalImage(normal_image) depth_image = drawDepthImage(depth_image) return masked_image.astype(np.uint8), normal_image.astype(np.uint8), depth_image
def annotation_jitter( I, a_in, min_box_width=20, jitter_scale_min=0.9, jitter_scale_max=1.1, jitter_offset=16, target_width=640, target_height=480 ): assert I.shape[ 2 ] == 3, 'Not implemented for images with more than 3 channels' a = copy.deepcopy(a_in) # MA: sanity check new_rects = [] for i in range(len(a.rects)): r = a.rects[i] try: assert (r.x1 < r.x2 and r.y1 < r.y2) new_rects.append(r) except: print('bad rectangle') a.rects = new_rects if a.rects: cur_min_box_width = min([r.width() for r in a.rects]) else: cur_min_box_width = min_box_width / jitter_scale_min # don't downscale below min_box_width jitter_scale_min = max( jitter_scale_min, float(min_box_width) / cur_min_box_width ) # it's always ok to upscale jitter_scale_min = min(jitter_scale_min, 1.0) jitter_scale_max = jitter_scale_max jitter_scale = np.random.uniform(jitter_scale_min, jitter_scale_max) jitter_flip = np.random.random_integers(0, 1) if jitter_flip == 1: I = np.fliplr(I) for r in a: r.x1 = I.shape[1] - r.x1 r.x2 = I.shape[1] - r.x2 r.x1, r.x2 = r.x2, r.x1 for p in r.point: p.x = I.shape[1] - p.x I1 = cv2.resize( I, None, fx=jitter_scale, fy=jitter_scale, interpolation=cv2.INTER_CUBIC ) jitter_offset_x = np.random.random_integers(-jitter_offset, jitter_offset) jitter_offset_y = np.random.random_integers(-jitter_offset, jitter_offset) rescaled_width = I1.shape[1] rescaled_height = I1.shape[0] px = round(0.5 * (target_width)) - round(0.5 * (rescaled_width)) + jitter_offset_x py = round(0.5 * (target_height)) - round(0.5 * (rescaled_height)) + jitter_offset_y I2 = np.zeros((target_height, target_width, 3), dtype=I1.dtype) x1 = max(0, px) y1 = max(0, py) x2 = min(rescaled_width, target_width - x1) y2 = min(rescaled_height, target_height - y1) I2[0:(y2 - y1), 0:(x2 - x1), :] = I1[y1:y2, x1:x2, :] ox1 = round(0.5 * rescaled_width) + jitter_offset_x oy1 = round(0.5 * rescaled_height) + jitter_offset_y ox2 = round(0.5 * target_width) oy2 = round(0.5 * target_height) for r in a: r.x1 = round(jitter_scale * r.x1 - x1) r.x2 = round(jitter_scale * r.x2 - x1) r.y1 = round(jitter_scale * r.y1 - y1) r.y2 = round(jitter_scale * r.y2 - y1) if r.x1 < 0: r.x1 = 0 if r.y1 < 0: r.y1 = 0 if r.x2 >= I2.shape[1]: r.x2 = I2.shape[1] - 1 if r.y2 >= I2.shape[0]: r.y2 = I2.shape[0] - 1 for p in r.point: p.x = round(jitter_scale * p.x - x1) p.y = round(jitter_scale * p.y - y1) # MA: make sure all points are inside the image r.point = [ p for p in r.point if p.x >= 0 and p.y >= 0 and p.x < I2.shape[1] and p.y < I2.shape[0] ] new_rects = [] for r in a.rects: if r.x1 <= r.x2 and r.y1 <= r.y2: new_rects.append(r) else: pass a.rects = new_rects return I2, a
def _perform(self): # Header keyword to update key = 'IMGRECT' keycom = 'Image rectified?' # get amp mode ampmode = self.action.args.ccddata.header['AMPMODE'].strip().upper() if '__B' in ampmode or '__G' in ampmode: newimg = np.rot90(self.action.args.ccddata.data, 2) self.action.args.ccddata.data = newimg if self.action.args.ccddata.uncertainty: newunc = np.rot90(self.action.args.ccddata.uncertainty.array, 2) self.action.args.ccddata.uncertainty.array = newunc mask = getattr(self.action.args.ccddata, "mask", None) if mask is not None: newmask = np.rot90(mask, 2) self.action.args.ccddata.mask = newmask else: self.logger.info("No mask data to rectify") flags = getattr(self.action.args.ccddata, "flags", None) if flags is not None: newflags = np.rot90(flags, 2) self.action.args.ccddata.flags = newflags else: self.logger.info("No flags data to rectify") elif '__D' in ampmode or '__F' in ampmode: newimg = np.fliplr(self.action.args.ccddata.data) self.action.args.ccddata.data = newimg if self.action.args.ccddata.uncertainty: newunc = np.fliplr(self.action.args.ccddata.uncertainty.array) self.action.args.ccddata.uncertainty.array = newunc mask = getattr(self.action.args.ccddata, "mask", None) if mask is not None: newmask = np.fliplr(mask) self.action.args.ccddata.mask = newmask else: self.logger.info("No mask data to rectify") flags = getattr(self.action.args.ccddata, "flags", None) if flags is not None: newflags = np.fliplr(flags) self.action.args.ccddata.flags = newflags else: self.logger.info("No flags data to rectify") elif '__A' in ampmode or '__H' in ampmode or 'TUP' in ampmode: newimg = np.flipud(self.action.args.ccddata.data) self.action.args.ccddata.data = newimg if self.action.args.ccddata.uncertainty: newunc = np.flipud(self.action.args.ccddata.uncertainty.array) self.action.args.ccddata.uncertainty.array = newunc mask = getattr(self.action.args.ccddata, "mask", None) if mask is not None: newmask = np.flipud(mask) self.action.args.ccddata.mask = newmask else: self.logger.info("No mask data to rectify") flags = getattr(self.action.args.ccddata, "flags", None) if flags is not None: newflags = np.flipud(flags) self.action.args.ccddata.flags = newflags else: self.logger.info("No flags data to rectify") self.action.args.ccddata.header[key] = (True, keycom) log_string = RectifyImage.__module__ self.action.args.ccddata.header['HISTORY'] = log_string self.logger.info(log_string) # write out int image kcwi_fits_writer(self.action.args.ccddata, table=self.action.args.table, output_file=self.action.args.name, output_dir=self.config.instrument.output_directory, suffix="int") self.context.proctab.update_proctab(frame=self.action.args.ccddata, suffix="int", filename=self.action.args.name) self.context.proctab.write_proctab() return self.action.args
def computeBoundaryFaces(elemsF, facesN): """Compute boundary faces of a tetrahedral mesh. :param ndarray elemsF: elements-face connectivity. :param ndarray facesN: faces-nodes connectivity. :return: nodal-connectivity and indexes of boundary-faces, number of boundary faces. :rtype: ndarray """ # Sort indexes and add 1 position in order to use indexes as Matlab A0 = np.sort(elemsF[:, 0]) + 1 I0 = np.argsort(elemsF[:, 0]) + 1 A1 = np.sort(elemsF[:, 1]) + 1 I1 = np.argsort(elemsF[:, 1]) + 1 A2 = np.sort(elemsF[:, 2]) + 1 I2 = np.argsort(elemsF[:, 2]) + 1 A3 = np.sort(elemsF[:, 3]) + 1 I3 = np.argsort(elemsF[:, 3]) + 1 # Number of faces nFaces = elemsF.max() # As consequence, dimensions of E must be increased # 2 rows and 1 column E = np.zeros((nFaces + 2, 9)) E[A0, 1] = I0 E[A1, 2] = I1 E[A2, 3] = I2 E[A3, 4] = I3 # If the same face is listed in the same row of 'elemsF' # more than, once it will simply be missed! Because of this we # have to insert the following dummy variables in order to # determine the boundary faces. tmp = np.diff(A0) == 0 ind0 = np.where(tmp)[False] tmp = np.diff(A1) == 0 ind1 = np.where(tmp)[False] tmp = np.diff(A2) == 0 ind2 = np.where(tmp)[False] tmp = np.diff(A3) == 0 ind3 = np.where(tmp)[False] E[A0[ind0], 5] = 1 E[A1[ind1], 6] = 1 E[A2[ind2], 7] = 1 E[A3[ind3], 8] = 1 # Delete extra rows and column E = np.delete(E, (0), axis=0) E = np.delete(E, (0), axis=1) # Final sorting E.sort() E = np.fliplr(E) # Get boundary nodes by first examining which columns in E # have only one nonzero element, meaning that this face is # related to only one single tetra, which means it is on the # boundary of the domain. Since faces are defined by their nodes, # we have the boundary nodes too. # Get boundary faces to nodes ind = (E[:, 1] == 0) bfacesN = np.array(np.transpose(facesN[ind, :]), dtype=np.int) # Get indexes of boundary faces ind = np.where(ind == True) bFaces = np.array(np.transpose(ind), dtype=np.int) size = bFaces.shape nBoundaryFaces = size[0] bFaces = bFaces.reshape((nBoundaryFaces)) return bfacesN, bFaces, nBoundaryFaces
def dem2array( filename, variable_name='elevation', easting_min=None, easting_max=None, northing_min=None, northing_max=None, use_cache=False, verbose=False, ): """Read Digitial Elevation model from the following NetCDF format (.dem) Example: ncols 3121 nrows 1800 xllcorner 722000 yllcorner 5893000 cellsize 25 NODATA_value -9999 138.3698 137.4194 136.5062 135.5558 .......... name_in should be .dem file to be read. """ import os from anuga.file.netcdf import NetCDFFile msg = 'Filename must be a text string' assert isinstance(filename, basestring), msg msg = 'Extension should be .dem' assert os.path.splitext(filename)[1] in ['.dem'], msg msg = 'Variable name must be a text string' assert isinstance(variable_name, basestring), msg # Get NetCDF infile = NetCDFFile(filename, netcdf_mode_r) if verbose: log.critical('Reading DEM from %s' % (filename)) ncols = int(infile.ncols) nrows = int(infile.nrows) xllcorner = float(infile.xllcorner) # Easting of lower left corner yllcorner = float(infile.yllcorner) # Northing of lower left corner cellsize = float(infile.cellsize) NODATA_value = float(infile.NODATA_value) zone = int(infile.zone) false_easting = float(infile.false_easting) false_northing = float(infile.false_northing) # Text strings projection = infile.projection datum = infile.datum units = infile.units Z = infile.variables[variable_name][:] Z = Z.reshape(nrows, ncols) Z = num.where(Z == NODATA_value, num.nan, Z) #changed the orientation of Z array to make it consistent with grd2array result Z = num.fliplr(Z.T) #print ncols, nrows, xllcorner,yllcorner, cellsize, NODATA_value, zone x = num.linspace(xllcorner, xllcorner + (ncols - 1) * cellsize, ncols) y = num.linspace(yllcorner, yllcorner + (nrows - 1) * cellsize, nrows) return x, y, Z
def draw_boxes(image, boxes=None, refined_boxes=None, masks=None, captions=None, visibilities=None, title="", ax=None): """Draw bounding boxes and segmentation masks with different customizations. boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates. refined_boxes: Like boxes, but draw with solid lines to show that they're the result of refining 'boxes'. masks: [N, height, width] captions: List of N titles to display on each box visibilities: (optional) List of values of 0, 1, or 2. Determine how prominent each bounding box should be. title: An optional title to show over the image ax: (optional) Matplotlib axis to draw on. """ # Number of boxes assert boxes is not None or refined_boxes is not None N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0] # Matplotlib Axis if not ax: _, ax = plt.subplots(1, figsize=(12, 12)) # Generate random colors colors = random_colors(N) # Show area outside image boundaries. margin = image.shape[0] // 10 ax.set_ylim(image.shape[0] + margin, -margin) ax.set_xlim(-margin, image.shape[1] + margin) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): # Box visibility visibility = visibilities[i] if visibilities is not None else 1 if visibility == 0: color = "gray" style = "dotted" alpha = 0.5 elif visibility == 1: color = colors[i] style = "dotted" alpha = 1 elif visibility == 2: color = colors[i] style = "solid" alpha = 1 # Boxes if boxes is not None: if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in cropping. continue y1, x1, y2, x2 = boxes[i] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=alpha, linestyle=style, edgecolor=color, facecolor='none') ax.add_patch(p) # Refined boxes if refined_boxes is not None and visibility > 0: ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32) p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none') ax.add_patch(p) # Connect the top-left corners of the anchor and proposal if boxes is not None: ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) # Captions if captions is not None: caption = captions[i] # If there are refined boxes, display captions on them if refined_boxes is not None: y1, x1, y2, x2 = ry1, rx1, ry2, rx2 ax.text(x1, y1, caption, size=11, verticalalignment='top', color='w', backgroundcolor="none", bbox={'facecolor': color, 'alpha': 0.5, 'pad': 2, 'edgecolor': 'none'}) # Masks if masks is not None: mask = masks[:, :, i] masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) ax.imshow(masked_image.astype(np.uint8))
def display_instances_5fps(image, boxes, masks, class_ids, class_names, scores=None, title="", figsize=(16, 16), ax=None, show_mask=True, show_bbox=True, colors=None, captions=None, making_video=False, making_image=False, detect=False, hc=False, real_time=False): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: [num_instances] class_names: list of class names of the dataset scores: (optional) confidence scores for each box title: (optional) Figure title show_mask, show_bbox: To show masks and bounding boxes or not figsize: (optional) the size of the image colors: (optional) An array or colors to use with each object captions: (optional) A list of strings to use as captions for each object """ # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] # If no axis is passed, create one and automatically call show() auto_show = True if not ax: fig, ax = plt.subplots(1, figsize=figsize) canvas = FigureCanvas(fig) # Generate random colors if not making_video or not real_time: colors = colors or random_colors(N) # Show area outside image boundaries. height, width = image.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): class_id = class_ids[i] if making_video or real_time: # you can also assign a specific color for each class. etc: # if class_id == 1: # color = colors[0] # elif class_id == 2: # color = colors[1] color = colors[class_id - 1] elif hc: #just for hard-code the mask for paper if class_id == 14: color = colors[0] else: color = colors[class_id] else: color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] if show_bbox: p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # Label if not captions: score = scores[i] if scores is not None else None label = class_names[class_id] x = random.randint(x1, (x1 + x2) // 2) caption = "{} {:.3f}".format(label, score) if score else label else: caption = captions[i] ax.text(x1, y1 + 8, caption, color='w', size=14, backgroundcolor="none") # Mask mask = masks[:, :, i] if show_mask: masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) ax.imshow(masked_image.astype(np.uint8)) if detect: plt.close() return canvas # To transform the drawn figure into ndarray X fig.canvas.draw() X = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') X = X.reshape(fig.canvas.get_width_height()[::-1] + (3, )) # open cv's RGB style: BGR if not real_time: X = X[..., ::-1] if making_video or real_time: plt.close() return X elif making_image: cv2.imwrite('splash.png', X) if auto_show: plt.show()
def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] if mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None # MixUp augmentation if random.random() < hyp['mixup']: img, labels = mixup( img, labels, *load_mosaic(self, random.randint(0, self.n - 1))) else: # Load image img, (h0, w0), (h, w) = load_image(self, index) # Letterbox shape = self.batch_shapes[self.batch[ index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ( (h / h0, w / w0), pad) # for COCO mAP rescaling labels = self.labels[index].copy() if labels.size: # normalized xywh to pixel xyxy format labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) if self.augment: img, labels = random_perspective( img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear'], perspective=hyp['perspective']) nl = len(labels) # number of labels if nl: labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) if self.augment: # Albumentations img, labels = self.albumentations(img, labels) nl = len(labels) # update after albumentations # HSV color-space augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Flip up-down if random.random() < hyp['flipud']: img = np.flipud(img) if nl: labels[:, 2] = 1 - labels[:, 2] # Flip left-right if random.random() < hyp['fliplr']: img = np.fliplr(img) if nl: labels[:, 1] = 1 - labels[:, 1] # Cutouts # labels = cutout(img, labels, p=0.5) labels_out = torch.zeros((nl, 6)) if nl: labels_out[:, 1:] = torch.from_numpy(labels) # Convert img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes
def display_instances(image, boxes, masks, class_ids, class_names, scores=None, title="", figsize=(16, 16), ax=None): """ boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. masks: [height, width, num_instances] class_ids: [num_instances] class_names: list of class names of the dataset scores: (optional) confidence scores for each box figsize: (optional) the size of the image. """ # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] if not ax: _, ax = plt.subplots(1, figsize=figsize) # Generate random colors colors = random_colors(N) # Show area outside image boundaries. height, width = image.shape[:2] ax.set_ylim(height + 10, -10) ax.set_xlim(-10, width + 10) ax.axis('off') ax.set_title(title) masked_image = image.astype(np.uint32).copy() for i in range(N): color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') ax.add_patch(p) # Label class_id = class_ids[i] score = scores[i] if scores is not None else None label = class_names[class_id] x = random.randint(x1, (x1 + x2) // 2) caption = "{} {:.3f}".format(label, score) if score else label ax.text(x1, y1 + 8, caption, color='w', size=11, backgroundcolor="none") # Mask mask = masks[:, :, i] masked_image = apply_mask(masked_image, mask, color) # Mask Polygon # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros( (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 p = Polygon(verts, facecolor="none", edgecolor=color) ax.add_patch(p) ax.imshow(masked_image.astype(np.uint8)) #toimage(masked_image).show()#worked toimage(masked_image).save(fp='maskonly\\'+time.strftime("%Y%m%d-%H%M%S")+'.jpg')
def add_trend(x, trend="c", prepend=False, has_constant='skip'): """ Adds a trend and/or constant to an array. Parameters ---------- X : array-like Original array of data. trend : str {"c","t","ct","ctt"} "c" add constant only "t" add trend only "ct" add constant and linear trend "ctt" add constant and linear and quadratic trend. prepend : bool If True, prepends the new data to the columns of X. has_constant : str {'raise', 'add', 'skip'} Controls what happens when trend is 'c' and a constant already exists in X. 'raise' will raise an error. 'add' will duplicate a constant. 'skip' will return the data without change. 'skip' is the default. Returns ------- y : array, recarray or DataFrame The original data with the additional trend columns. If x is a recarray or pandas Series or DataFrame, then the trend column names are 'const', 'trend' and 'trend_squared'. Notes ----- Returns columns as ["ctt","ct","c"] whenever applicable. There is currently no checking for an existing trend. See also -------- statsmodels.tools.tools.add_constant """ # TODO: could be generalized for trend of aribitrary order trend = trend.lower() columns = ['const', 'trend', 'trend_squared'] if trend == "c": # handles structured arrays columns = columns[:1] trendorder = 0 elif trend == "ct" or trend == "t": columns = columns[:2] if trend == "t": columns = columns[1:2] trendorder = 1 elif trend == "ctt": trendorder = 2 else: raise ValueError("trend %s not understood" % trend) is_recarray = _is_recarray(x) is_pandas = _is_using_pandas(x, None) or is_recarray if is_pandas or is_recarray: if is_recarray: descr = x.dtype.descr x = pd.DataFrame.from_records(x) elif isinstance(x, pd.Series): x = pd.DataFrame(x) else: x = x.copy() else: x = np.asanyarray(x) nobs = len(x) trendarr = np.vander(np.arange(1, nobs + 1, dtype=np.float64), trendorder + 1) # put in order ctt trendarr = np.fliplr(trendarr) if trend == "t": trendarr = trendarr[:, 1] if "c" in trend: if is_pandas or is_recarray: # Mixed type protection def safe_is_const(s): try: return np.ptp(s) == 0.0 and np.any(s != 0.0) except: return False col_const = x.apply(safe_is_const, 0) else: col_const = np.logical_and(np.any(np.ptp(np.asanyarray(x), axis=0) == 0, axis=0), np.all(x != 0.0, axis=0)) if np.any(col_const): if has_constant == 'raise': raise ValueError("x already contains a constant") elif has_constant == 'skip': columns = columns[1:] trendarr = trendarr[:, 1:] order = 1 if prepend else -1 if is_recarray or is_pandas: trendarr = pd.DataFrame(trendarr, index=x.index, columns=columns) x = [trendarr, x] x = pd.concat(x[::order], 1) else: x = [trendarr, x] x = np.column_stack(x[::order]) if is_recarray: x = x.to_records(index=False) new_descr = x.dtype.descr extra_col = len(new_descr) - len(descr) if prepend: descr = new_descr[:extra_col] + descr else: descr = descr + new_descr[-extra_col:] if not PY3: # See 3658 names = [entry[0] for entry in descr] dtypes = [entry[1] for entry in descr] names = [bytes(name) for name in names] # Fail loudly if there is a non-ascii name descr = list(zip(names, dtypes)) x = x.astype(np.dtype(descr)) return x
def generateImages(self, canvas_img_files, paste_img_files, paste_label_dir, save_img_dir, save_label_dir): """ Intialize a generic detectnet data generator class. It finds the filenames for canvas and paste images, and labels, and splits them into train and validation spilts. :param canvas_img_files: canvas image files :param paste_img_files: paste image files. :param save_img_dir: save image directory. :param save_label_dir: save label directory. """ out_labels = [] # Go through each canvas image and generate a set of images from it depending on its size. self.paste_image_idx = 0 canvas_idx = 1 for canvas_img_file in canvas_img_files: # canvas_img_file = '/media/dcofer/Ubuntu_Data/drone_images/landscapes/vlcsnap-2018-12-21-1.png' # canvas_img_orig = misc.imread(canvas_img_file) canvas_img_orig = cv2.imread(canvas_img_file) # showAndWait('canvas_img_orig', canvas_img_orig) logging.info("Processing file {}. Shape {}".format( canvas_img_file, canvas_img_orig.shape)) width_multiple = float( canvas_img_orig.shape[1]) / self.final_img_width height_multiple = float( canvas_img_orig.shape[0]) / self.final_img_height # If one of the dimensions are less than our final values then just use this image once as is. if width_multiple < 1 or height_multiple < 1: ratio = float(self.final_img_width) / self.final_img_height # Use the dimension that is smallest and scale the image up so it is greater than final image height if width_multiple < height_multiple: new_height = int(canvas_img_orig.shape[0] * ratio) if new_height < self.final_img_height: new_height = self.final_img_height canvas_img = resize(canvas_img_orig, [new_height, self.final_img_width]) else: new_width = int(canvas_img_orig.shape[1] * ratio) if new_width < self.final_img_width: new_width = self.final_img_width canvas_img = resize(canvas_img_orig, [self.final_img_height, new_width]) else: canvas_img = canvas_img_orig canvas_img = img_as_ubyte(canvas_img.copy()) # showAndWait('canvas_img', canvas_img) # cv2.imwrite('/media/dcofer/Ubuntu_Data/drone_images/canvas_img.png', canvas_img) # Now recompute the multiple after potential resizing width_multiple = float(canvas_img.shape[1]) / self.final_img_width height_multiple = float( canvas_img.shape[0]) / self.final_img_height tile_idx = self.splitCanvasIntoTiles( canvas_img_file, canvas_img, paste_img_files, paste_label_dir, save_img_dir, save_label_dir, width_multiple, height_multiple, canvas_idx, out_labels) # Now resize the entire image into the final size and add paste images. whole_canvas_img = img_as_ubyte( resize(canvas_img_orig, [self.final_img_height, self.final_img_width])) # showAndWait('whole_canvas_img', whole_canvas_img) flipped_canvas_img = np.fliplr(whole_canvas_img) # showAndWait('flipped_canvas_img', flipped_canvas_img) rotated_canvas_img = self.rotateCanvasImage(flipped_canvas_img) # This fills in any black spots from rotation with pixels from the original flipped image. where = np.array(np.where(rotated_canvas_img)) flipped_canvas_img[where[0], where[1]] = rotated_canvas_img[where[0], where[1]] rotated_canvas_img = flipped_canvas_img self.addPastedImages(canvas_img_file, rotated_canvas_img, paste_img_files, paste_label_dir, save_img_dir, save_label_dir, canvas_idx, tile_idx + 1, out_labels) canvas_idx += 1 logging.info("Canvas Idx: {}".format(canvas_idx)) logging.info("writing json label files") json_txt = json.dumps(out_labels) out_file = save_img_dir + "/sim_output_labels.json" with open(out_file, 'w') as f: f.write(json_txt)
same as `slope`. bestfit : array-like The reconstructed best-fit line. The shape is the same as `y`. """ if x.ndim != 1 or x.size != y.shape[axis]: raise ValueError( f'Invalid x-shape {x.shape} for regression along axis {axis} ' f'of y-shape {y.shape}.') with _ArrayContext(y, push_right=axis) as context: # Get regression coefficients. Flattened data is shape (K, N) # where N is regression dimension. Permute to (N, K) then back again. # N gets replaced with length-2 dimension (slope, offset). y = context.data y_params, y_var = np.polyfit(x, y.T, deg=1, cov=True) y_params = np.fliplr(y_params.T) # flip to (offset, slope) # Get best-fit line and slope y_fit = y_params[:, :1] + x * y_params[:, 1:] y_slope = y_params[:, 1:] # Get standard error # See Dave's paper (TODO: add citation) n = y.shape[1] resid = y - y_fit # residual mean = resid.mean(axis=1) var = resid.var(axis=1) rho = np.sum( (resid[:, 1:] - mean[:, None]) * (resid[:, :-1] - mean[:, None]), axis=1, ) / ((n - 1) * var) # correlation factor
def __getitem__(self, index): if self.image_weights: index = self.indices[index] hyp = self.hyp if self.mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None else: # Load image img, (h0, w0), (h, w) = load_image(self, index) # Letterbox shape = self.batch_shapes[self.batch[ index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ( (h / h0, w / w0), pad) # for COCO mAP rescaling # Load labels labels = [] x = self.labels[index] if x.size > 0: # Normalized xywh to pixel xyxy format labels = x.copy() labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] if self.augment: # Augment imagespace if not self.mosaic: img, labels = random_affine(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear']) # Augment colorspace augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Apply cutouts # if random.random() < 0.9: # labels = cutout(img, labels) nL = len(labels) # number of labels if nL: # convert xyxy to xywh labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # Normalize coordinates 0 - 1 labels[:, [2, 4]] /= img.shape[0] # height labels[:, [1, 3]] /= img.shape[1] # width if self.augment: # random left-right flip lr_flip = True if lr_flip and random.random() < 0.5: img = np.fliplr(img) if nL: labels[:, 1] = 1 - labels[:, 1] # random up-down flip ud_flip = False if ud_flip and random.random() < 0.5: img = np.flipud(img) if nL: labels[:, 2] = 1 - labels[:, 2] labels_out = torch.zeros((nL, 6)) if nL: labels_out[:, 1:] = torch.from_numpy(labels) # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return torch.from_numpy(img), labels_out, self.img_files[index], shapes
def _random_flipud(self, random_flipud=True): if random_flipud and np.random.choice([True, False]): self.img = np.fliplr(self.img)
measurements.append(float(line[3]) + correction) images.append(image_right) measurements.append(float(line[3]) - correction) images.append(image_centre) measurement = float(line[3]) measurements.append(measurement) # data augmentation aug_images, aug_measurements = [], [] for image, measurement in zip(images, measurements): aug_images.append(image) aug_measurements.append(measurement) aug_images.append(np.fliplr(image)) # this rotates the image aug_measurements.append(measurement*-1.0) # gives the measurement a opposite value now X_train = np.array(aug_images) y_train = np.array(aug_measurements) X_train.shape plt.imshow(X_train[0]) plt.show() from keras.models import Sequential from keras.layers.core import Dense, Flatten, Activation, Dropout from keras.layers.convolutional import Convolution2D, Conv2D from keras.layers import Lambda, Cropping2D
def addPastedImages(self, canvas_img_file, canvas_img, paste_img_files, paste_label_dir, save_img_dir, save_label_dir, canvas_idx, tile_idx, out_labels): """ Adds paste images to the canvas file and saves it and the labels. :param canvas_img_file: canvas image filename to split :param canvas_img: canvas image to split :param paste_img_files: paste image files. :param width_multiple: multiple of width to final image size. :param height_multiple: multiple of height to final image size. :param canvas_idx: canvas index. :param tile_idx: tile index. """ canvas_width = canvas_img.shape[1] canvas_height = canvas_img.shape[0] num_pastes = np.random.randint(self.min_pasted_per_canvas, self.max_pasted_per_canvas) labels = [] if canvas_height != self.final_img_height or canvas_width != self.final_img_width: logging.error( "The canvas height for a paste add does not match the final image dimensions. Skipping image." ) return canvas_label = np.zeros([canvas_height, canvas_width, 3], dtype=np.uint8) logging.info("num_pastes: {}".format(num_pastes)) for past_idx in range(num_pastes): paste_img_file_idx = self.paste_image_idx # paste_img_file = '/media/dcofer/Ubuntu_Data/drone_images/drones/111.png' paste_img_file = paste_img_files[paste_img_file_idx] paste_filename = os.path.basename(paste_img_file) paste_basename = os.path.splitext(paste_filename)[0] logging.info(" Pasting in {}".format(paste_img_file)) logging.info(" Paste Image Idx {}".format(paste_img_file_idx)) paste_img = self.loadPasteImage(paste_img_file) # utils.showAndWait('paste_img', paste_img) paste_label_file = paste_label_dir + '/' + paste_basename + '_label.png' paste_label_img = cv2.imread(paste_label_file, cv2.IMREAD_UNCHANGED) paste_label_img = cv2.cvtColor(paste_label_img, cv2.COLOR_BGR2GRAY) # utils.showAndWait('paste_label_img', paste_label_img) paste_width = paste_img.shape[1] paste_height = paste_img.shape[0] logging.info(" paste_width: {}".format(paste_width)) logging.info(" paste_height: {}".format(paste_height)) if paste_label_img.shape[ 0] != paste_height or paste_label_img.shape[ 1] != paste_width: raise RuntimeError( "Paste label dims do not match paste image.") new_paste_width, new_paste_height = self.calcNewPasteDims( paste_width, paste_height) logging.info(" new_paste_width: {}".format(new_paste_width)) logging.info(" new_paste_height: {}".format(new_paste_height)) if paste_width != new_paste_width or paste_height != new_paste_height: sized_paste_img = cv2.resize(paste_img, dsize=(new_paste_width, new_paste_height), interpolation=cv2.INTER_AREA) sized_paste_mask = cv2.resize(paste_label_img, dsize=(new_paste_width, new_paste_height), interpolation=cv2.INTER_AREA) else: sized_paste_img = paste_img sized_paste_mask = paste_label_img # utils.showAndWait('sized_paste_img', sized_paste_img) # utils.showAndWait('sized_paste_mask', sized_paste_mask) flip_val = np.random.randint(0, 100) if flip_val < 50: logging.info( " flip_val: {}. Flipping image.".format(flip_val)) flipped_paste_img = np.fliplr(sized_paste_img) flipped_paste_mask = np.fliplr(sized_paste_mask) else: logging.info( " flip_val: {}. Leaving image unflipped".format( flip_val)) flipped_paste_img = sized_paste_img flipped_paste_mask = sized_paste_mask rotate_deg = int( np.random.uniform(-self.max_paste_rotation, self.max_paste_rotation)) logging.info(" rotate_deg: {}.".format(rotate_deg)) rotated_paste_img, rotated_paste_mask = utils.rotateImg( flipped_paste_img, rotate_deg, mask_in=flipped_paste_mask) # utils.showAndWait('rotated_paste_img', rotated_paste_img) # utils.showAndWait('rotated_paste_mask', rotated_paste_mask) paste_width = rotated_paste_img.shape[1] paste_height = rotated_paste_img.shape[0] paste_x, paste_y = self.getNonoverlappingPastePos( paste_width, paste_height, canvas_width, canvas_height, labels) if paste_x < 0 or paste_y < 0: break # paste_x = 1081 # paste_y = 266 logging.info(" paste_x: {}".format(paste_x)) logging.info(" paste_y: {}".format(paste_y)) # Get canvas ROI canvas_roi = canvas_img[paste_y:(paste_y + paste_height), paste_x:(paste_x + paste_width)] #canvas_roi = np.zeros([paste_height, paste_width, 3], dtype=np.uint8) # showAndWait('canvas_roi', canvas_roi) # Regnerate a new mask because the one that was put through all the processing is not # intact anymore. This was causing weird artifacting. ret, mask = cv2.threshold(rotated_paste_mask, 5, 255, cv2.THRESH_BINARY) # mask = new_mask[:, :, 0] mask_inv = cv2.bitwise_not(mask) # utils.showAndWait('mask', mask) # Black out the are of the mask. background_roi = cv2.bitwise_and(canvas_roi, canvas_roi, mask=mask_inv) # showAndWait('background_roi', background_roi) # cv2.imwrite('/media/dcofer/Ubuntu_Data/drone_images/canvas_ros.png', background_roi) # Now randomly change brightness and contract of foreground drone bright_rand = np.random.randint(0, 100) if bright_rand < self.blur_thresh: logging.info( " bright_rand: {}. Adjusting brightness/contrast.". format(bright_rand)) bright_val = np.random.randint(-self.bright_max, self.bright_max) contrast_val = np.random.normal(1.0, self.contrast_max) if contrast_val < 0.5: contrast_val = 0.7 if contrast_val > 1.5: contrast_val = 1.3 logging.info(" bright_val: {}".format(bright_val)) logging.info(" contrast_val: {}".format(contrast_val)) bright_foreground_img = cv2.convertScaleAbs(rotated_paste_img, alpha=contrast_val, beta=bright_val) # utils.showAndWait('rotated_paste_img', rotated_paste_img) # utils.showAndWait('bright_foreground_img', bright_foreground_img) else: logging.info( " bright_rand: {}. Leaving image brightness/contrast alone" .format(bright_rand)) bright_foreground_img = rotated_paste_img # Now take only region of paste image that is not black foreground_roi = cv2.bitwise_and(bright_foreground_img, bright_foreground_img, mask=mask) # showAndWait('foreground_roi', foreground_roi) # cv2.imwrite('/media/dcofer/Ubuntu_Data/drone_images/foreground_roi.png', foreground_roi) # Put them together merged_roi = cv2.add(background_roi, foreground_roi) # showAndWait('merged_roi', merged_roi) # cv2.imwrite('/home/mfp/drone-net/test/merged_roi.png', merged_roi) # Now randomly add blur blur_rand = np.random.randint(0, 100) if blur_rand < self.blur_thresh: logging.info( " blur_rand: {}. bluring image.".format(blur_rand)) blur_val = np.random.randint(1, self.blur_max) logging.info(" blur_val: {}".format(blur_val)) if blur_val > 0: # blured_roi = cv2.GaussianBlur(merged_roi, (blur_val, blur_val), 0) blured_roi = cv2.blur(merged_roi, (blur_val, blur_val)) else: blured_roi = merged_roi # cv2.imwrite('/home/mfp/drone-net/test/blured_roi.png', blured_roi) else: logging.info( " blur_rand: {}. Leaving image un-blurred".format( blur_rand)) blured_roi = merged_roi # Now put them back into the canvas canvas_img[paste_y:(paste_y + paste_height), paste_x:(paste_x + paste_width)] = blured_roi # utils.showAndWait('canvas_img', canvas_img) # Now put the label into the canvas ret, label_mask = cv2.threshold(rotated_paste_mask, 5, 255, cv2.THRESH_BINARY) where_label = np.array(np.where(label_mask)) canvas_label_roi = canvas_label[paste_y:(paste_y + label_mask.shape[0]), paste_x:(paste_x + label_mask.shape[1]), 2] canvas_label_roi[where_label[0], where_label[1]] = 255 canvas_label[paste_y:(paste_y + label_mask.shape[0]), paste_x:(paste_x + label_mask.shape[1]), 2] = canvas_label_roi # utils.showAndWait('canvas_label', canvas_label) self.canvas_paste_links.append([ canvas_idx, tile_idx, canvas_img_file, paste_img_file, paste_x, paste_y, paste_width, paste_height ]) json_label = { "class": "rect", "height": paste_height, "width": paste_width, "x": paste_x, "y": paste_y } labels.append(json_label) self.incrementNextPastedImageIndex(paste_img_files) # canvas_img = utils.drawLabels(canvas_img, labels) # utils.showAndWait('canvas_img', canvas_img) save_img_file = save_img_dir + '/{}_{}_{}.jpg'.format( self.file_prefix, canvas_idx, tile_idx) cv2.imwrite(save_img_file, canvas_img) logging.info("saving image: {}".format(save_img_file)) #misc.imsave(save_file, canvas_img) save_label_file = save_label_dir + '/{}_{}_{}.txt'.format( self.file_prefix, canvas_idx, tile_idx) utils.saveYoloLabelFile(0, labels, save_label_file, canvas_width, canvas_height) logging.info("saving lable: {}".format(save_label_file)) # # save_label_file = save_label_dir + '/()_{}_{}_label.png'.format(self.file_prefix, canvas_idx, tile_idx) # cv2.imwrite(save_label_file, canvas_label) # logging.info("saving lable image: {}".format(save_label_file)) json_label = { "class": "image", "filename": save_img_file, "annotations": labels } out_labels.append(json_label)
def flip_vertical_horizontal(image): return np.flipud(np.fliplr(image))
def flip(image, random_flip): if random_flip and np.random.choice([True, False]): image = np.fliplr(image) return image
def flip_horizontal(image): return np.fliplr(image)
import numpy as np N = 4 M = 5 V = np.random.randint(low=-10, high=10, size=(N, M)) print("Матрица:\r\n{}\n".format(V)) a = b = np.fliplr(V).diagonal(1) a_prod = a.prod() print("Элементы которые выше побочной диагонали: \n" + str(a) + "\nИх сумма = " + str(a_prod)) b = np.fliplr(V).diagonal(-1) b_prod = b.prod() print("Элементы которые ниже побочной диагонали: \n" + str(b) + "\nИх сумма = " + str(b_prod))