def parse(self, filename): self.sample_size = self.file_len(filename) // NUM_ROWS f = open(filename, "r") #opens trainingimages retlist = [] # parsing for i in xrange (0, self.sample_size): currList = [] #the array of lines for each Digit curr_digit = Face() for i in xrange(NUM_ROWS): currline = f.readline() currList.append(currline) curr_digit.number = currList curr_digit.processFeatures() retlist.append(curr_digit) f.close() return retlist
def iterateNetThroughPath(path, trans, speed, network, center, win, updateTime ): for i in range(len(trans)): plt.scatter(path[i][0],path[i][1],label="Posisjon") plt.draw() faceParam = network.activate(trans[i]) smileDeg, pupilx, pupily, eyebrow = faceParam Face.drawFace(center,smileDeg, pupilx, pupily, 0, eyebrow, win ) win.update() printNetInOut(trans[i],faceParam, path[i],speed) time.sleep(updateTime)
inText = contents(filename) tempname = "HFacer.tmp" out = open(tempname, "w") hfile = open(filename) CopyWithInsertion(hfile, out, genfn, definition) out.close() hfile.close() outText = contents(tempname) if inText == outText: os.unlink(tempname) else: os.unlink(filename) os.rename(tempname, filename) f = Face.Face() try: f.ReadFromFile("Scintilla.iface") findEnumValues(f) with open("ScintillaWrapperGenerated.cpp", 'w') as cpp: writeCppFile(f, cpp) Regenerate("ScintillaWrapper.h", writeHFile, f) Regenerate("ScintillaPython.cpp", writeBoostWrapFile, f) Regenerate("Enums.h", writeEnumsHFile, f) Regenerate("EnumsWrapper.cpp", writeEnumsWrapperFile, f) Regenerate(r"..\..\docs\source\scintilla.rst", writeScintillaDoc, f) Regenerate(r"..\..\docs\source\enums.rst", writeScintillaEnums, f) #Regenerate("SciLexer.h", printLexHFile, f) #print("Maximum ID is %s" % max([x for x in f.values if int(x) < 3000])) except:
def RegenerateAll(root): f = Face.Face() f.ReadFromFile(root + "include/SciLexer.iface") Regenerate(root + "include/SciLexer.h", "/* ", printLexHFile(f))
def understoodType(t): return t in understoodTypes or Face.IsEnumeration(t)
def main(args): videoLink = args.video_link print('Creating networks and loading parameters') with tf.Graph().as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) with sess.as_default(): pnet, rnet, onet = detect_face.create_mtcnn(sess, None) minsize = 20 # minimum size of face threshold = [0.6, 0.7, 0.7] # three steps's threshold factor = 0.709 # scale factor margin = 44 frame_interval = args.frame_interval batch_size = 1000 image_size = 182 input_image_size = 160 max_age = args.max_age print('Loading feature extraction model') modeldir = args.modeldir debug = args.debug print("Debug: ", debug) if debug == 'True': debug = True else: debug = False if debug: print("videoLink: ", args.video_link) facenet.load_model(modeldir) images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") embedding_size = embeddings.get_shape()[1] classifier_filename = args.classifier_filename classifier_filename_exp = os.path.expanduser(classifier_filename) with open(classifier_filename_exp, 'rb') as infile: (model, class_names) = pickle.load(infile) print('load classifier file-> %s' % classifier_filename_exp) # video_capture = cv2.VideoCapture(0) #webcam video_capture = cv2.VideoCapture(args.video_link) c = 0 fid = 0 faces = [] target_distance = args.target_distance print('Start Recognition!') prevTime = 0 while True: ret, frame = video_capture.read() frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5) # resize frame (optional) curTime = time.time() + 1 # calc fps timeF = frame_interval new = True show = False for i in faces: i.age_one() if (c % timeF == 0): if frame.ndim == 2: frame = facenet.to_rgb(frame) frame = frame[:, :, 0:3] bounding_boxes, _ = detect_face.detect_face( frame, minsize, pnet, rnet, onet, threshold, factor) nrof_faces = bounding_boxes.shape[0] if nrof_faces > 0: det = bounding_boxes[:, 0:4] img_size = np.asarray(frame.shape)[0:2] cropped = [] scaled = [] scaled_reshape = [] bb = np.zeros((nrof_faces, 4), dtype=np.int32) for i in range(nrof_faces): emb_array = np.zeros((1, embedding_size)) bb[i][0] = det[i][0] bb[i][1] = det[i][1] bb[i][2] = det[i][2] bb[i][3] = det[i][3] # inner exception if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][ 2] >= len( frame[0]) or bb[i][3] >= len(frame): if debug: print('face is inner of range!') continue cropped.append(frame[bb[i][1]:bb[i][3], bb[i][0]:bb[i][2], :]) try: cropped[i] = facenet.flip(cropped[i], False) except: continue if debug: print('Processing Status: PROCESSING FRAME') scaled.append( misc.imresize(cropped[i], (image_size, image_size), interp='bilinear')) scaled[i] = cv2.resize( scaled[i], (input_image_size, input_image_size), interpolation=cv2.INTER_CUBIC) scaled[i] = facenet.prewhiten(scaled[i]) scaled_reshape.append(scaled[i].reshape( -1, input_image_size, input_image_size, 3)) feed_dict = { images_placeholder: scaled_reshape[i], phase_train_placeholder: False } emb_array[0, :] = sess.run(embeddings, feed_dict=feed_dict) predictions = model.predict_proba(emb_array) best_class_indices = np.argmax(predictions, axis=1) best_class_probabilities = predictions[ np.arange(len(best_class_indices)), best_class_indices] # plot result idx under box text_x = bb[i][0] text_y = bb[i][3] + 20 if debug: print('frame_interval: ', frame_interval) # track faces result_names = class_names[best_class_indices[0]] for k in faces: # print(best_class_probabilities[0]) if abs(bb[i][0]-k.getX()) <= target_distance\ and abs(bb[i][1] - k.getY())\ <= target_distance and k.getDone() is False: if debug: print(k.getAge(), 'X Diff: ', abs(bb[i][0] - k.getX()), 'Y Diff: ', abs(bb[i][1] - k.getY())) new = False if best_class_probabilities[0] > 0.20: k.updateCoords(bb[i][0], bb[i][1]) k.updateConfidence( best_class_probabilities[0]) result_names = class_names[ best_class_indices[0]] k.updateStaffID( result_names.split(' ')[0]) k.updateName( result_names.split(' ')[1]) if k.getAge() > 1: show = True color = k.getRGB() counter = Counter(k.getName()) most_common = counter.most_common() if debug: print('Show: ', show) print(most_common) if show: if len(most_common) >= 2: f_n, f_v = most_common[0] s_n, s_v = most_common[1] if f_n != 'Unk': name_to_show = f_n # name_to_show = name_mode else: name_to_show = s_n if len(most_common) == 1: f_n, f_v = most_common[0] name_to_show = f_n # print(name_to_show) if new: f = Face.MyFace(fid, bb[i][0], bb[i][1], max_age) f.updateConfidence(best_class_probabilities[0]) result_names = class_names[ best_class_indices[0]] f.updateStaffID(result_names.split(' ')[0]) name = result_names.split(' ')[1] f.updateName(name) color = f.getRGB() faces.append(f) fid += 1 name_to_show = '' cv2.rectangle(frame, (bb[i][0], bb[i][1]), (bb[i][2], bb[i][3]), color, 2) # boxing face if name_to_show == 'Unk': name_to_show = 'Unknown' if debug: print('Detected As: ', name_to_show) cv2.putText(frame, name_to_show, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, thickness=1, lineType=2) else: if debug: print('Unable to align') else: if debug: print('Processing Status: NOT PROCESSING FRAME') if frame.ndim == 2: frame = facenet.to_rgb(frame) frame = frame[:, :, 0:3] bounding_boxes, _ = detect_face.detect_face( frame, minsize, pnet, rnet, onet, threshold, factor) nrof_faces = bounding_boxes.shape[0] if nrof_faces > 0: det = bounding_boxes[:, 0:4] img_size = np.asarray(frame.shape)[0:2] cropped = [] scaled = [] scaled_reshape = [] bb = np.zeros((nrof_faces, 4), dtype=np.int32) for i in range(nrof_faces): emb_array = np.zeros((1, embedding_size)) bb[i][0] = det[i][0] bb[i][1] = det[i][1] bb[i][2] = det[i][2] bb[i][3] = det[i][3] # inner exception if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][ 2] >= len( frame[0]) or bb[i][3] >= len(frame): if debug: print('face is inner of range!') continue for k in faces: # print(best_class_probabilities[0]) if abs(bb[i][0]-k.getX()) <= target_distance\ and abs(bb[i][1] - k.getY())\ <= target_distance and k.getDone() is False: if debug: print(k.getAge(), 'X Diff: ', abs(bb[i][0] - k.getX()), 'Y Diff: ', abs(bb[i][1] - k.getY())) if k.getAge() > 1: show = True color = k.getRGB() counter = Counter(k.getName()) most_common = counter.most_common() text_x = bb[i][0] text_y = bb[i][3] + 20 if debug: print('Show: ', show) print(most_common) if show: if len(most_common) >= 2: f_n, f_v = most_common[0] s_n, s_v = most_common[1] if f_n != 'Unk': name_to_show = f_n # name_to_show = name_mode else: name_to_show = s_n elif len(most_common) == 1: f_n, f_v = most_common[0] name_to_show = f_n else: name_to_show = 'Unknown' cv2.rectangle(frame, (bb[i][0], bb[i][1]), (bb[i][2], bb[i][3]), color, 2) # boxing face if name_to_show == 'Unk': name_to_show = 'Unknown' if debug: print('Detected As: ', name_to_show) cv2.putText( frame, name_to_show, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, thickness=1, lineType=2) sec = curTime - prevTime prevTime = curTime fps = 1 / (sec) str = 'FPS: %2.3f' % fps text_fps_x = len(frame[0]) - 150 text_fps_y = 20 cv2.putText(frame, str, (text_fps_x, text_fps_y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), thickness=1, lineType=2) c += 1 if frame.shape[0] < 1000: frame = cv2.resize(frame, (0, 0), fx=1.5, fy=1.5) cv2.imshow('Video', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break video_capture.release() # #video writer # out.release() cv2.destroyAllWindows()
class LocalPrefixDiscovery: #private: __slots__ = [ "_subscribers", "_currentPrefix", "_needStopDiscovery", "_scheduler", "_timeouts", "_face", "_evenLoop", "_eventLoopThread", "_periodicity" ] # public: def __init__(self, periodicity=30): # 30 seconds self._periodicity = periodicity self._subscribers = {} self._currentPrefix = Name() self._needStopDiscovery = True self._scheduler = Scheduler() self._scheduler.start() self._timeouts = 0 self._face = Face() self._eventLoop = Face.EventLoop(self._face) def subscribe(self, tag, callback): self._subscribers[tag] = callback if (len(self._subscribers.values()) == 1): self._start() def unsubscribe(self, tag): del self._subscribers[tag] if (len(self._subscribers.values()) == 0): self._stop() def shutdown(self): self._stop() self._scheduler.shutdown() #private: def _start(self): self._needStopDiscovery = False self._eventLoopThread = threading.Thread( target=self._pyndn_loop_ignoring_errors) self._eventLoopThread.start() nextDiscovery = datetime.datetime.now() + datetime.timedelta(seconds=1) self._scheduler.add_date_job(self._requestLocalPrefix, nextDiscovery) def _stop(self): self._needStopDiscovery = True self._eventLoop.stop() self._eventLoopThread.join() def _pyndn_loop_ignoring_errors(self): while not self._needStopDiscovery: try: self._eventLoop.run() except: if not self._needStopDiscovery: self._face.disconnect() time.sleep(self._periodicity) self._face.connect() def _requestLocalPrefix(self): self._timeouts = 0 self._face.expressInterestForLatest(Name("/local/ndn/prefix"), self._onLocalPrefix, self._onTimeout) def _onLocalPrefix(self, baseName, interest, data, kind): try: name = Name(str(data.content).strip(' \t\n\r')) except: pass if (name != self._currentPrefix): for subscriber in self._subscribers.values(): subscriber(self._currentPrefix, name) self._currentPrefix = name if not self._needStopDiscovery: nextDiscovery = datetime.datetime.now() + datetime.timedelta( seconds=self._periodicity) self._scheduler.add_date_job(self._requestLocalPrefix, nextDiscovery) return Closure.RESULT_OK def _onTimeout(self, baseName, interest): if self._timeouts < 3: self._timeouts = self._timeouts + 1 return Closure.RESULT_REEXPRESS else: # do stuff name = Name() if (name != self._currentPrefix): for subscriber in self._subscribers.values(): subscriber(self._currentPrefix, name) self._currentPrefix = name if not self._needStopDiscovery: nextDiscovery = datetime.datetime.now() + datetime.timedelta( seconds=self._periodicity) self._scheduler.add_date_job(self._requestLocalPrefix, nextDiscovery) return Closure.RESULT_OK
def readInterface(cleanGenerated): f = Face.Face() if not cleanGenerated: f.ReadFromFile("../../include/Scintilla.iface") return f
def readIn(self): with open(self.filename) as f: content = f.readlines() content = [x.strip("\n") for x in content] for c in content: if len(c) > 0 and c[0] == '#': # It's a comment, ignore it continue line = c.split(' ') if len(line) < 2: # TODO: maybe throw an error? continue if line[0] == "v": # Vertex x = float(line[1]) y = float(line[2]) z = float(line[3]) self.vertices.append(Vertex(x, y, z)) # Figure out bounding boxes if x < self.minx: self.minx = x if x > self.maxx: self.maxx = x if y < self.miny: self.miny = y if y > self.maxy: self.maxy = y if z < self.minz: self.minz = z if z > self.maxz: self.maxz = z elif line[0] == "vn": # Vector normal if line[1] == "nan" or line[2] == "nan" or line[3] == "nan": self.vertexNormals.append(VertexNormal(0, 0, 0)) self.vertexNormals.append( VertexNormal(float(line[1]), float(line[2]), float(line[3]))) elif line[0] == "f": # Face x = line[1].split('/') y = line[2].split('/') z = line[3].split('/') if len(x) < 3 or len(y) < 3 or len(z) < 3: raise Exception("Invalid Face Format") self.faces.append( Face( int(x[0]) - 1, int(x[2]) - 1, int(y[0]) - 1, int(y[2]) - 1, int(z[0]) - 1, int(z[2]) - 1))
def RegenerateAll(root, _showMaxID): f = Face.Face() f.ReadFromFile(root / "include/LexicalStyles.iface") FileGenerator.Regenerate(root / "include/SciLexer.h", "/* ", printLexHFile(f))
def addnew(): if request.method == 'POST' and request.headers[ 'Content-Type'] == 'application/octet-stream' and request.headers[ 'Name'] != '': try: image = open('./images/image.jpg', 'wb') image.write(request.data) image.close() image = open('./images/image.jpg', 'rb') response = Face.who_is_it(image) image.close() if isinstance(response, dict): if 0 in response and response[0]['Name'] == 'Unknown': name = response[0]['Date'] + '-' + response[0][ 'Time'] + '-' + request.headers['Name'] name = name.replace("/", "") name = name.replace(":", "") os.rename('./images/image.jpg', './images/' + name + '.jpg') time.sleep(1) image = open('./images/' + name + '.jpg', 'rb') result = { 0: { 'Status': Face.add_new(image, name=request.headers['Name']) } } image.close() print(result) return jsonify(result) elif 0 in response and response[0]['Name'] == request.headers[ 'Name']: name = response[0]['Date'] + '-' + response[0][ 'Time'] + '-' + request.headers['Name'] name = name.replace("/", "") name = name.replace(":", "") os.rename('./images/image.jpg', './images/' + name + '.jpg') time.sleep(1) image = open('./images/' + name + '.jpg', 'rb') result = { 0: { 'Status': Face.add_new(image, human_id=response[0]['Person ID']) } } image.close() print(result) return jsonify(result) elif 0 in response and response[0]['Name'] != request.headers[ 'Name']: name = response[0]['Date'] + '-' + response[0][ 'Time'] + '-' + request.headers['Name'] name = name.replace("/", "") name = name.replace(":", "") os.rename('./images/image.jpg', './images/' + name + '.jpg') time.sleep(1) image = open('./images/' + name + '.jpg', 'rb') result = { 0: { 'Status': Face.add_new(image, name=request.headers['Name'], human_id=response[0]['Person ID']) } } image.close() print(result) return jsonify(result) return {0: 'Error'} except FileNotFoundError as error: print(error) return {0: 'FileNotFoundError'} except FileExistsError as error: print(error) return {0: 'FileExistsError'} except PermissionError as error: print(error) return {0: 'PermissionError'} else: return redirect(url_for('static', filename='add.html'))
def main(): f = Face.Face() f.ReadFromFile("Scintilla.iface") Regenerate("../{{cookiecutter.plugin_slug}}/src/ScintillaEditor.h", "/* ", printLexGatewayFile(f))
def RegenerateAll(): f = Face.Face() f.ReadFromFile("Scintilla.iface") #idsInOrder = idsFromDocumentation(srcRoot + "/scintilla/doc/ScintillaDoc.html") Regenerate("../src/SciIFaceTable.cpp", "//", printIFaceTableCppFile([f, []]))