def custom2pdb(coords, proteinnet_id, route): """ Takes a custom representation and turns into a .pdb file. Inputs: * coords: array/tensor of shape (3 x N) or (N x 3). in Angstroms. same order as in the proteinnnet is assumed (same as raw pdb file) * proteinnet_id: str. proteinnet id format (<class>#<pdb_id>_<chain_number>_<chain_id>) see: https://github.com/aqlaboratory/proteinnet/ * route: str. destin route. Output: tuple of routes: (original, generated) for the structures. """ # convert to numpy if isinstance(coords, torch.Tensor): coords = coords.detach().cpu().numpy() # ensure (1, N, 3) if coords.shape[1] == 3: coords = coords.T coords = np.newaxis(coords, axis=0) # get pdb id and chain num pdb_name, chain_num = proteinnet_id.split("#")[-1].split("_")[:-1] pdb_destin = "/".join(route.split("/")[:-1])+"/"+pdb_name+".pdb" # download pdb file and select appropiate download_pdb(pdb_name, pdb_destin) clean_pdb(pdb_destin, chain_num=chain_num) # load trajectory scaffold and replace coordinates - assumes same order scaffold = mdtraj.load_pdb(pdb_destin) scaffold.xyz = coords scaffold.save(route) return pdb_destin, route
def PCA(Y, components): """ run PCA, retrieving the first (components) principle components return [s0, eig, w0] s0: factors w0: weights """ N,D = Y.shape sv = linalg.svd(Y, full_matrices=0); [s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]] v = s0.std(axis=0) s0 /= v; w0 *= v; return [s0, w0] if N>D: sv = linalg.svd(Y, full_matrices=0); [s0, w0] = [sv[0][:, 0:components], np.dot(np.diag(sv[1]), sv[2]).T[:, 0:components]] v = s0.std(axis=0) s0 /= v; w0 *= v; return [s0, w0] else: K=np.cov(Y) sv = linalg.eigh(K) std_var = np.sqrt(sv[0]) pc = sv[1]*std_var[np.newaxis(),0] # #ipdb.set_trace() return [pc,std_var]
def on_train_batch_begin(self, batch, logs=None): if batch % self.freq or self.finished: return while batch >= self._batch: x, y = self.yield_batch() if self.max_images==-1: self.max_images=x.shape[0] if x.ndim==3: np.newaxis(x, axis=0) if x.shape[0]>self.max_images: x = x[:self.max_images,...] y = y[:self.max_images,...] x = x.numpy() y = np.argmax(y.numpy(),axis=1) if self.encoder: y = self.encoder.decode(y) for i in range(x.shape[0]): # self.add_log(x[i,...], counter=i, name = f'{self.name}-{y[i]}-batch_{str(self._batch).zfill(3)}') self.add_log(x[i,...], counter=self._count+i, name = f'{self.name}-{y[i]}') print(f'Batch {self._batch}: Logged {np.max([x.shape[0],self.max_images])} {self.name} images to neptune')
[[0 0 0 0 0 0] [2 0 0 0 0 0] [0 3 0 0 0 0] [0 0 4 0 0 0] [0 0 0 5 0 0] [0 0 0 0 6 6]] ''' d3[:, ::-1] d3[d3 > 3] = 100 d3[5, 5] = 0 print(d3) ''' [[0 0 0 0 0 0] [2 0 0 0 0 0] [0 3 0 0 0 0] [0 0 4 0 0 0] [0 0 0 5 0 0] [0 0 0 0 6 0]] ''' np.stack() np.hstack() np.dstack() np.concatenate() np.tile() np.newaxis() np.repeat() np.where() np.argmax() np.dot() np.transpose()
template = cv2.imread('building2.jpg', 0) w, h = template.shape[::-1] res = cv2.matchTemplate(gray, template, cv2.TM_CCOEFF_NORMED) threshold = 0.5 loc = np.where(res >= threshold) for pt in zip(*loc[::-1]): cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (255, 0, 255), 2) cv2.imshow('Detected', img) # to seperate the car from the background img = cv2.imread('car.jpg') mask = np.zeros(img.shape[:2], np.uint8) bgdModel = np.zeros((1, 65), np.float64) fgdModel = np.zeros((1, 65), np.float64) rect = (0, 0, 480, 640) cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT) mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8') img = img * mask2[:, :, np.newaxis] plt.imshow(img) plt.colorbar() plt.show() img2 = img[::np.newaxis()] cv2.imshow('img', img2) cv2.waitKey(0) cv2.destroyAllWindows()