def set_contents_and_styles(self, contents, styles): """Propagates feature maps and Gram matrices to all TileWorkers.""" content_shms, style_shms = [], [] for worker in self.workers: for content in contents: features_shm = { layer: SharedNDArray.copy(content.features[layer]) for layer in content.features } content_shms.append(ContentData(features_shm)) for style in styles: grams_shm = { layer: SharedNDArray.copy(style.grams[layer]) for layer in style.grams } style_shms.append(StyleData(grams_shm)) worker.req_q.put(SetContentsAndStyles(content_shms, style_shms)) for worker in self.workers: self.resp_q.get() for shms in content_shms: _ = [shm.unlink() for shm in shms.features.values()] for shms in style_shms: _ = [shm.unlink() for shm in shms.grams.values()]
def process_one_request(self): """Receives one request from the master process and acts on it.""" req = self.req_q.get() logger.debug('Started request %s', req) layers = [] if isinstance(req, FeatureMapRequest): for layer in reversed(self.model.layers()): if layer in req.layers: layers.append(layer) features = self.model.eval_features_tile(req.img.array, layers) req.img.unlink() features_shm = { layer: SharedNDArray.copy(features[layer]) for layer in features } self.resp_q.put(FeatureMapResponse(req.resp, features_shm)) if isinstance(req, SCGradRequest): for layer in reversed(self.model.layers()): if layer in req.content_layers + req.style_layers + req.dd_layers: layers.append(layer) self.model.roll(req.roll, jitter_scale=1) loss, grad = self.model.eval_sc_grad_tile( req.img.array, req.start, layers, req.content_layers, req.style_layers, req.dd_layers, req.layer_weights, req.content_weight, req.style_weight, req.dd_weight) req.img.unlink() self.model.roll(-req.roll, jitter_scale=1) self.resp_q.put( SCGradResponse(req.resp, loss, SharedNDArray.copy(grad))) if isinstance(req, SetContentsAndStyles): self.model.contents, self.model.styles = [], [] for content in req.contents: features = \ {layer: content.features[layer].array.copy() for layer in content.features} self.model.contents.append(ContentData(features)) for style in req.styles: grams = \ {layer: style.grams[layer].array.copy() for layer in style.grams} self.model.styles.append(StyleData(grams)) self.resp_q.put(()) if isinstance(req, SetThreadCount): set_thread_count(req.threads) logger.debug('Finished request %s', req)
def eval_sc_grad(self, pool, roll, content_layers, style_layers, dd_layers, layer_weights, content_weight, style_weight, dd_weight, tile_size): """Evaluates the summed style and content gradients.""" loss = 0 grad = np.zeros_like(self.img) img_size = np.array(self.img.shape[-2:]) ntiles = (img_size - 1) // tile_size + 1 tile_size = img_size // ntiles for y in range(ntiles[0]): for x in range(ntiles[1]): xy = np.array([y, x]) start = xy * tile_size end = start + tile_size if y == ntiles[0] - 1: end[0] = img_size[0] if x == ntiles[1] - 1: end[1] = img_size[1] tile = self.img[:, start[0]:end[0], start[1]:end[1]] pool.ensure_healthy() pool.request( SCGradRequest((start, end), SharedNDArray.copy(tile), roll, start, content_layers, style_layers, dd_layers, layer_weights, content_weight, style_weight, dd_weight)) pool.reset_next_worker() for _ in range(np.prod(ntiles)): (start, end), loss_tile, grad_tile = pool.resp_q.get() loss += loss_tile grad[:, start[0]:end[0], start[1]:end[1]] = grad_tile.array grad_tile.unlink() return loss, grad
def eval_features_once(self, pool, layers, tile_size=512): """Computes the set of feature maps for an image.""" img_size = np.array(self.img.shape[-2:]) ntiles = (img_size - 1) // tile_size + 1 tile_size = img_size // ntiles if np.prod(ntiles) > 1: print_('Using %dx%d tiles of size %dx%d.' % (ntiles[1], ntiles[0], tile_size[1], tile_size[0])) features = {} for layer in layers: scale, channels = self.layer_info(layer) shape = (channels, ) + tuple(np.int32(np.ceil(img_size / scale))) features[layer] = np.zeros(shape, dtype=np.float32) for y in range(ntiles[0]): for x in range(ntiles[1]): xy = np.array([y, x]) start = xy * tile_size end = start + tile_size if y == ntiles[0] - 1: end[0] = img_size[0] if x == ntiles[1] - 1: end[1] = img_size[1] tile = self.img[:, start[0]:end[0], start[1]:end[1]] pool.ensure_healthy() pool.request( FeatureMapRequest(start, SharedNDArray.copy(tile), layers)) pool.reset_next_worker() for _ in range(np.prod(ntiles)): start, feats_tile = pool.resp_q.get() for layer, feat in feats_tile.items(): scale, _ = self.layer_info(layer) start_f = start // scale end_f = start_f + np.array(feat.array.shape[-2:]) features[layer][:, start_f[0]:end_f[0], start_f[1]:end_f[1]] = feat.array feat.unlink() return features
import numpy as np from shared_ndarray import SharedNDArray import time def func(shm, ): while True: print(shm.array) time.sleep(0.5) def func2(shm, ): i = 0 while True: i += 1 shm.array[:] = np.ones((4, 4))[:] shm.array[0, 0] = i print(i) time.sleep(1) try: shm = SharedNDArray((4, 4)) p = mp.Process(target=func, args=(shm, )) p2 = mp.Process(target=func2, args=(shm, )) p.start() p2.start() p.join() p2.join() finally: shm.unlink()
def AssembleOneBatch(data, modelSpecs, forRefState=False, bounds=None, floatType=theano.config.floatX, bUseSharedMemory=False): if not data: print 'WARNING: the list of data is empty' return None numSeqs = len(data) seqLens = [d['seqLen'] for d in data] names = [d['name'] for d in data] ## use maxSeqLen and minSeqLen for sequential features ## we do not crop sequential features at this step since the theano deep model will do so after 1D convolution operation maxSeqLen = max(seqLens) minSeqLen = min(seqLens) #print 'maxSeqLen= ', maxSeqLen, 'minSeqLen= ', minSeqLen numSeqFeatures = FeatureUtils.DetermineNumSeqFeatures( data[0]['seqFeatures']) X1d = np.zeros(shape=(numSeqs, maxSeqLen, numSeqFeatures), dtype=floatType) numMatrixFeatures = FeatureUtils.DetermineNumMatrixFeatures( data[0]['matrixFeatures']) + FeatureUtils.DetermineNumMatrixFeatures( data[0]['matrixFeatures_nomean']) ## we use maxMatrixSize and minMatrixSize for pairwise features ## we crop pairwise features at this step to save memory and computational time minMatrixSize, maxMatrixSize = CalcMinMaxMatrixSize(bounds, seqLens) if bUseSharedMemory: shmX2d = SharedNDArray( (numSeqs, maxMatrixSize, maxMatrixSize, numMatrixFeatures), dtype=floatType, name='/RaptorX-' + str(os.getppid()) + '-X2d-' + randomString(6)) X2d = shmX2d.array X2d[:] = 0 else: X2d = np.zeros(shape=(numSeqs, maxMatrixSize, maxMatrixSize, numMatrixFeatures), dtype=floatType) X1dem = None if data[0].has_key('embedFeatures'): numEmbedFeatures = data[0]['embedFeatures'].shape[1] X1dem = np.zeros(shape=(numSeqs, maxSeqLen, numEmbedFeatures), dtype=floatType) ## Y shall be a list of 2D or 3D matrices, each for one response Y = [] if data[0].has_key('atomLabelMatrix'): for response in modelSpecs['responses']: labelName, labelType, _ = ParseResponse(response) dataType = np.int16 if not config.IsDiscreteLabel(labelType): dataType = floatType rValDims = GetResponseValueDims(response) if rValDims == 1: y = np.zeros(shape=(numSeqs, maxMatrixSize, maxMatrixSize), dtype=dataType) Y.append(y) else: y = np.zeros(shape=(numSeqs, maxMatrixSize, maxMatrixSize, rValDims), dtype=dataType) Y.append(y) ## when Y is empty, weight is useless. So When Y is None, weight shall also be None weightMatrix = [] if bool(Y) and config.UseSampleWeight(modelSpecs): weightMatrix = [ np.zeros(shape=(numSeqs, maxMatrixSize, maxMatrixSize), dtype=floatType) ] * len(modelSpecs['responses']) ## for mask. we do not used shared ndarray for them since they are small M1d = np.zeros(shape=(numSeqs, maxSeqLen - minSeqLen), dtype=np.int8) M2d = np.zeros(shape=(numSeqs, maxMatrixSize - minMatrixSize, maxMatrixSize), dtype=np.int8) if bounds is not None: boxes = bounds else: boxes = [None] * len(data) for j, d, box in zip(range(len(data)), data, boxes): seqLen = d['seqLen'] ## posInSeq, posInX and posInY are the starting position of one protein in the final output tensor posInSeq = -seqLen ## here X and Y refer to x-axis and y-axis if box is not None: top, left, bottom, right = box posInX = -(bottom - top) posInY = -(right - left) else: posInX = -seqLen posInY = -seqLen if forRefState: ## this code needs reexamination, it may not be correct when d['seqFeatures']/d['matrixFeatures'] is represented as a list of arrays instead of a single array X1d[j, posInSeq:, :] = np.array( [modelSpecs['seqFeatures_expected']] * seqLen).reshape( (seqLen, -1)) tmp = [modelSpecs['matrixFeatures_expected']] * (seqLen * seqLen) tmp2 = np.array(tmp).reshape((seqLen, seqLen, -1)) tmp3 = np.concatenate((tmp2, d['matrixFeatures_nomean']), axis=2) if box is not None: X2d[j, posInX:, posInY:, :] = tmp3[top:bottom, left:right, ] else: X2d[j, posInX:, posInY:, :] = tmp3 else: if isinstance(d['seqFeatures'], np.ndarray): X1d[j, posInSeq:, :] = d['seqFeatures'] else: startPos = 0 for f in d['seqFeatures']: if len(f.shape) == 1: X1d[j, posInSeq:, startPos:startPos + 1] = f[:, np.newaxis] startPos += 1 elif len(f.shape) == 2: X1d[j, posInSeq:, startPos:startPos + f.shape[1]] = f startPos = startPos + f.shape[1] else: print 'wrong shape in sequential feature: ', f.shape exit(1) # add 2D features in matrixFeatures to holder staring from the start position # holder is a 3D array and start is the starting position in the 3rd dimension def Add2DFeatures(matrixFeatures, holder, start): if isinstance(matrixFeatures, np.ndarray): features = [matrixFeatures] else: features = matrixFeatures startPos = start #for f in matrixFeatures: for f in features: if len(f.shape) == 2: endPos = startPos + 1 if box is None: holder[:, :, startPos:endPos] = f[:, :, np.newaxis] else: holder[:, :, startPos:endPos] = f[top:bottom, left:right, np.newaxis] elif len(f.shape) == 3: endPos = startPos + f.shape[2] if box is None: holder[:, :, startPos:endPos] = f else: holder[:, :, startPos:endPos] = f[top:bottom, left:right, :] else: print 'wrong shape in matrixFeatures: ', f.shape exit(1) startPos = endPos return endPos end = Add2DFeatures(d['matrixFeatures'], X2d[j, posInX:, posInY:, :], 0) Add2DFeatures(d['matrixFeatures_nomean'], X2d[j, posInX:, posInY:, :], end) M1d[j, posInSeq:].fill(1) M2d[j, posInX:, posInY:].fill(1) if X1dem is not None: ## embed feature is always represented as a single array, so the code shall be correct if forRefState: X1dem[j, posInSeq:, :] = np.array( [modelSpecs['embedFeatures_expected']] * seqLen).reshape( (seqLen, -1)) else: X1dem[j, posInSeq:, :] = d['embedFeatures'] for y, response in zip(Y, modelSpecs['responses']): if box is not None: tmp = d['atomLabelMatrix'][response][top:bottom, left:right] else: tmp = d['atomLabelMatrix'][response] if len(y.shape) == 3: y[j, posInX:, posInY:] = tmp else: y[j, posInX:, posInY:, ] = tmp if bool(weightMatrix): if d.has_key('labelWeightMatrix'): labelWeightMatrix = d['labelWeightMatrix'] else: labelWeightMatrix = LabelUtils.CalcLabelWeightMatrix( d['atomLabelMatrix'], modelSpecs, floatType=floatType) for w, response in zip(weightMatrix, modelSpecs['responses']): if box is not None: w[j, posInX:, posInY:] = labelWeightMatrix[response][top:bottom, left:right] else: w[j, posInX:, posInY:] = labelWeightMatrix[response] if bUseSharedMemory: onebatch = [X1d, shmX2d, M1d, M2d] else: onebatch = [X1d, X2d, M1d, M2d] if X1dem is not None: onebatch.append(X1dem) onebatch.extend(Y) onebatch.extend(weightMatrix) return onebatch, names
else: if is_detected.value == 1: is_detected.value = 0 else: pass print(is_detected.value) def recognition(frame): while True: pass if __name__ == "__main__": try: frame = SharedNDArray((480, 640, 3)) view_running = Value('i', 1) face_tracking_running = Value('i', 1) is_detected = Value('i', 0) view = Process(target=view, args=( frame, view_running, )) face_tracking = Process(target=face_tracking, args=( frame, face_tracking_running, is_detected, ))
import expression if __name__ == "__main__": try: pi = pigpio.pi() img2encoding.img2encoding() with open("face/face_list.json", "r") as f: face_list = json.load(f) known_face_names = list(face_list.keys()) # list known_face_names.append("unknown") HEIGHT = Value('i', 320) WIDTH = Value('i', 480) frame = SharedNDArray((HEIGHT.value, WIDTH.value, 3)) face_location = SharedNDArray((1, 4)) emotion = SharedNDArray((1, 7)) emotion_total = SharedNDArray((1, 7)) emotion_dict = { 0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised" } view_running = Value('i', 1) face_tracking_running = Value('i', 1)