def main(invideofilename, facefilename, outvideofilename): faces = Faces("") faces.__setstate__(common.json.loadfile(facefilename)) dir = tempfile.mkdtemp() try: for i, f, totframes in common.video.frames(invideofilename, maxframes=len( faces.frames)): outf = os.path.join(dir, "out%05d.jpg" % i) print >> sys.stderr, "Processing %s to %s, image %s" % ( f, outf, common.str.percent(i + 1, totframes)) print >> sys.stderr, stats() draw_faces(faces.frames[i], f, outf) # I learned this command from here: http://electron.mit.edu/~gsteele/ffmpeg/ cmd = "ffmpeg -y -r 30 -b 10000k -i %s %s" % (os.path.join( dir, 'out%05d.jpg'), outvideofilename) print >> sys.stderr, "Stitching video together as test1800.mp4" print >> sys.stderr, cmd # import time # time.sleep(30) common.misc.runcmd(cmd) print >> sys.stderr, stats() finally: print >> sys.stderr, "Removing dir %s" % dir shutil.rmtree(dir)
def main(facefilename): faces = Faces("") faces.__setstate__(common.json.loadfile(facefilename)) showedcnt = 0 # Construct one chain per face chains = [] for i, frame in enumerate(faces.frames): for face in frame: assert face.is_face() chain = [(i, face)] chains.append(FaceChain(chain)) chains.sort() facechains = FaceChains() facechains.copy_from_faces(faces) facechains.chains = chains facechains.join_nearby(1) facechains.deleteshortchains() facechains.gaussiansmoothchains() facechains.gaussiansmoothchains() print common.json.dumps(facechains.__getstate__())
def main(videofilename): faces = Faces(videofilename) for i, f, totframes in common.video.frames(videofilename): # for i, f, totframes in common.video.frames(videofilename, maxframes=1000): print >> sys.stderr, "Processing %s, image %s" % ( f, common.str.percent(i + 1, totframes)) print >> sys.stderr, stats() image = cvLoadImage(f) faces.set_dimensions(image.width, image.height) faces.add_frame(i, detect_faces(image)) if i % 100 == 0 and i != 0: print >> sys.stderr, common.json.dumps(faces.__getstate__()) print common.json.dumps(faces.__getstate__())
def main(to_reload, student_name, image_size, latent_space): IMG_DIM = image_size ARCHITECTURE = [ IMG_DIM**2, # 784 pixels 500, 500, # intermediate encoding latent_space ] # latent space dims # (and symmetrically back out again) numpy_dir = './vae_ready_numpy_arrays/' + student_name + '/' + student_name + '.npy' images_dir = './video_data/' + student_name + '/video_1/images/' if to_reload != 'None': # restore meta_graph_dir = './out/' + student_name + '/' + to_reload v = vae_face.VAE(ARCHITECTURE, HYPERPARAMS, meta_graph=meta_graph_dir) print("Loaded!") if image_size != 0: makeVideo(v, student_name, images_dir, image_size, latent_space) else: print('Need image path to make video!') else: # train faces = Faces(numpy_dir) LOG_DIR = './log/' + student_name + '/' METAGRAPH_DIR = './out/' + student_name + '/' v = vae_face.VAE(ARCHITECTURE, HYPERPARAMS, log_dir=LOG_DIR) v.train(faces, max_iter=MAX_ITER, max_epochs=MAX_EPOCHS, cross_validate=False, verbose=True, save=True, outdir=METAGRAPH_DIR) print("Trained!")
file_path = '/Users/ruslanpepa/PycharmProjects/testrin-pycharm/octahedron.txt' VERTEX = 6 # количество вершин в многограннике EDGES = 12 # количество ребер в многограннике FACES = 8 # количестов граней в многограннике TIMES = 10000 # количество шагов по времени list_faces = [] # список, который будет содержать все грани with open(file_path) as fl_wth_fs: # выгрузим из файла все номера вершин lines = fl_wth_fs.readlines() for line in lines: # все номера вершин загоним в списко файлов ns_vx = line.rstrip('\n').split( '\t') # получили только числа из каждой строки a = int(ns_vx[0]) b = int(ns_vx[1]) c = int(ns_vx[2]) list_faces.append(Faces(a, b, c)) conformal_weights = np.zeros((VERTEX, TIMES), float) # конформные веса в вершинах gauss_curve = adjacency_matrix( list_faces, VERTEX) # гауссова кривизна в вершинах многогранника length_matrix = adjacency_matrix(list_faces, VERTEX) # матрица смежности длин рёбер for i in range(0, VERTEX): conformal_weights[i, 0] = 1 # заполним матрицу длин рёбер случайными наборами чисел # for i in range(0, length_matrix.count_nonzero()): # row, col = length_matrix.nonzero() # список все индексов в строке, которые # length_matrix[row[i], col[i]] = random.randrange(1, 10, 1) + 0.1*random.randrange(0, 9, 1) # # print('hello') caley_menger = None while caley_menger != None:
location = bvTable.getAutoUpdateValue('allianceLocation',1) # NOTE: NOTE: NOTE # # For now just create one image pipeline to share with each image processor # LATER we will modify this to allow for a dictionary (switch-like) interface # to be injected into the image processors; this will allow the image processors # to have a selector for exclusion processor of different pipelines # # I.e., the idea is to create separate image processors when concurrent pipelines # are desired (e.g., look for faces AND pink elephants at the same time), and place # the exclusive options into a single processor (e.g., look for faces OR pink elephants) nada = Nada() cubes = Cubes() faces = Faces() balls = FindBalls() # NOTE: NOTE: NOTE: # # YOUR MILEAGE WILL VARY # The exposure values are camera/driver dependent and have no well defined standard (i.e., non-portable) # Our implementation is forced to use v4l2-ctl (Linux) to make the exposure control work because our OpenCV # port does not seem to play well with the exposure settings (produces either no answer or causes errors depending # on the camera used) FRONT_CAM_GEAR_EXPOSURE = 0 FRONT_CAM_NORMAL_EXPOSURE = -1 # Camera default frontCam = BucketCapture(name="FrontCam",src=0,width=320,height=240,exposure=FRONT_CAM_GEAR_EXPOSURE).start() # start low for gears #backCam = BucketCapture(name="BackCam",src=1,width=320,height=240,exposure=FRONT_CAM_GEAR_EXPOSURE).start() # start low for gears
from m5stack import * from faces import Faces keyboard = Faces() # read once print("Key value:", end='') print(keyboard.read()) # callback def keyboard_cb(value): print("Key value:", end='') print(value) lcd.print(value) keyboard.callback(keyboard_cb)
def __init__(self, sequence): self.sequence = sequence # The sequence of the colors self.faces = Faces() # Datastructure to model the color constraint self.partial = dict() # Contains the partial solutions self.solutions = 0 self.steps = 0
def __init__(self): self.running = False faces = Faces() self.known_face_encodings = faces.getKnownFaceEncodings() self.known_face_names = faces.getKnownFaceNames()
# port does not seem to play well with the exposure settings (produces either no answer or causes errors depending # on the cameras used) FRONT_CAM_GEAR_EXPOSURE = 0 FRONT_CAM_NORMAL_EXPOSURE = -1 # Camera default frontCam = Camera(name="FrontCam", src=0, width=320, height=240, exposure=FRONT_CAM_GEAR_EXPOSURE).start() while not frontCam.isRunning(): time.sleep(0.001) print("Cameras are online!") # OpenCV pipelines for Front Processor frontPipes = {'faces' : Faces('Faces'), 'redBoiler' : Nada('RedBoiler'), 'blueBoiler' : Nada('BlueBoiler'), 'gearLift' : GearLift('GearLift', bvTable) } frontProcessor = Processor("frontProcessor", frontCam, frontPipes['faces']).start() # This is just an example of a 2nd Processor # Note that it's OK to use the same Camera (frontCam in this case) to feed multiple Processors frontProc2 = Processor( "frontProc2", frontCam, frontPipes['gearLift']).start() while not frontProcessor.isRunning(): time.sleep(0.001) while not frontProc2.isRunning(): time.sleep(0.001)