示例#1
0
def main(invideofilename, facefilename, outvideofilename):
    faces = Faces("")
    faces.__setstate__(common.json.loadfile(facefilename))

    dir = tempfile.mkdtemp()
    try:
        for i, f, totframes in common.video.frames(invideofilename, maxframes=len(faces.frames)):
            outf = os.path.join(dir, "out%05d.jpg" % i)
            print >> sys.stderr, "Processing %s to %s, image %s" % (f, outf, common.str.percent(i+1, totframes))
            print >> sys.stderr, stats()

            draw_faces(faces.frames[i], f, outf)

        # I learned this command from here: http://electron.mit.edu/~gsteele/ffmpeg/
        cmd = "ffmpeg -y -r 30 -b 10000k -i %s %s" % (os.path.join(dir, 'out%05d.jpg'), outvideofilename)
        print >> sys.stderr, "Stitching video together as test1800.mp4"
        print >> sys.stderr, cmd
#        import time
#        time.sleep(30)
        common.misc.runcmd(cmd)
        print >> sys.stderr, stats()

    finally:
        print >> sys.stderr, "Removing dir %s" % dir
        shutil.rmtree(dir)
示例#2
0
def main(facefilename):
    faces = Faces("")
    faces.__setstate__(common.json.loadfile(facefilename))

    showedcnt = 0

    # Construct one chain per face
    chains = []
    for i, frame in enumerate(faces.frames):
        for face in frame:
            assert face.is_face()
            chain = [(i, face)]
            chains.append(FaceChain(chain))
    chains.sort()

    facechains = FaceChains()
    facechains.copy_from_faces(faces)
    facechains.chains = chains

    facechains.join_nearby(1)
    facechains.deleteshortchains()
    facechains.gaussiansmoothchains()
    facechains.gaussiansmoothchains()

    print common.json.dumps(facechains.__getstate__())
示例#3
0
def main(facefilename):
    faces = Faces("")
    faces.__setstate__(common.json.loadfile(facefilename))

    showedcnt = 0

    # Construct one chain per face
    chains = []
    for i, frame in enumerate(faces.frames):
        for face in frame:
            assert face.is_face()
            chain = [(i, face)]
            chains.append(FaceChain(chain))
    chains.sort()

    facechains = FaceChains()
    facechains.copy_from_faces(faces)
    facechains.chains = chains

    facechains.join_nearby(1)
    facechains.deleteshortchains()
    facechains.gaussiansmoothchains()
    facechains.gaussiansmoothchains()

    print common.json.dumps(facechains.__getstate__())
示例#4
0
def main(invideofilename, facefilename, outvideofilename):
    faces = Faces("")
    faces.__setstate__(common.json.loadfile(facefilename))

    dir = tempfile.mkdtemp()
    try:
        for i, f, totframes in common.video.frames(invideofilename,
                                                   maxframes=len(
                                                       faces.frames)):
            outf = os.path.join(dir, "out%05d.jpg" % i)
            print >> sys.stderr, "Processing %s to %s, image %s" % (
                f, outf, common.str.percent(i + 1, totframes))
            print >> sys.stderr, stats()

            draw_faces(faces.frames[i], f, outf)

        # I learned this command from here: http://electron.mit.edu/~gsteele/ffmpeg/
        cmd = "ffmpeg -y -r 30 -b 10000k -i %s %s" % (os.path.join(
            dir, 'out%05d.jpg'), outvideofilename)
        print >> sys.stderr, "Stitching video together as test1800.mp4"
        print >> sys.stderr, cmd
        #        import time
        #        time.sleep(30)
        common.misc.runcmd(cmd)
        print >> sys.stderr, stats()

    finally:
        print >> sys.stderr, "Removing dir %s" % dir
        shutil.rmtree(dir)
示例#5
0
class Solver(ABC):
    def __init__(self, sequence):
        self.sequence = sequence  # The sequence of the colors
        self.faces = Faces()  # Datastructure to model the color constraint
        self.partial = dict()  # Contains the partial solutions
        self.solutions = 0
        self.steps = 0

    @property
    @abstractmethod
    def starting_points():
        pass

    def solve(self):
        """ Start solving from every start point """
        for coord in self.starting_points:
            self._solve(coord, 0)
        return self.solutions, self.steps

    def _solve(self, coordinate, depth):
        """ Recursive step """
        # Condition for termination: The cube is completed
        if depth == len(self.sequence) - 1:
            self.solution()
            return True
        self.steps += 1
        color = self.sequence[depth]
        if self.constraints(coordinate, color):
            self.push(coordinate, color, depth)
            # Recursively solve for every neighbor
            for neighbor in neighbors(coordinate):
                if neighbor not in self.partial:  # Only use free spaces
                    self._solve(neighbor, depth + 1)
            self.pop(coordinate, color)

    def push(self, coordinate, color, depth):
        """ Step in: Go forward in the sequence and add the step to partial solution """
        self.partial[coordinate] = (color, depth)
        self.faces.push(coordinate, color)

    def pop(self, coordinate, color):
        """ Step out: Go backwards in the sequence and remove the step from the partial solution """
        self.faces.pop(coordinate, color)
        del self.partial[coordinate]

    def solution(self):
        """ Is called when a solution is found """
        self.solutions += 1
        human(self.partial)

    @abstractmethod
    def constraints(self, coordinate, color):
        """ Run checks to limit the recursion """
        return True
示例#6
0
    def add_face(self, image_path):
        faces = Faces()
        faces.from_file(image_path)
        image = Image.open(image_path)

        if not faces.data:
            raise MissingFaceException("image has no faces")
        
        face = self.find_key_face(faces, image)

        if not face:
            raise NoRectangleException("can't find valid rectangle for faces in image " + image_path)

        self.faces.append({"face": face, "image": image})
示例#7
0
def main(videofilename):
    faces = Faces(videofilename)
    for i, f, totframes in common.video.frames(videofilename):
        #    for i, f, totframes in common.video.frames(videofilename, maxframes=1000):
        print >> sys.stderr, "Processing %s, image %s" % (
            f, common.str.percent(i + 1, totframes))
        print >> sys.stderr, stats()
        image = cvLoadImage(f)
        faces.set_dimensions(image.width, image.height)
        faces.add_frame(i, detect_faces(image))

        if i % 100 == 0 and i != 0:
            print >> sys.stderr, common.json.dumps(faces.__getstate__())
    print common.json.dumps(faces.__getstate__())
    def get_frame(self):
        # lê e converte o frame de BGR para RGB e redimensiona o width para acelerar o processamento
        ret, frame = self.video.read()
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

        # executa a detecção de faces
        rects = faceClass.detectMultiScale(gray,
                                           scaleFactor=1.1,
                                           minNeighbors=5,
                                           minSize=(30, 30))

        # ajusta as medidas do rosto para TOP,RIGHT,BOTTOM,LEFT
        rostos = [(y, x + w, y + h, x) for (x, y, w, h) in rects]

        # classifica
        encodings = face_recognition.face_encodings(rgb, rostos)
        names = []

        for encoding in encodings:
            # faz a tentativa de match entre o input e os encodings existentes
            matches = face_recognition.compare_faces(data["encodings"],
                                                     encoding)
            name = "Unknown"

            if True in matches:
                # cria um dicionário com os pontos que combinaram na comparação
                matchedIds = [i for (i, b) in enumerate(matches) if b]
                counts = {}
                # loop over the matched indexes and maintain a count for
                # each recognized face face
                print('[INFO] matches in frame:' + str(len(matchedIds)))
                if (len(matchedIds) > 50):
                    for i in matchedIds:
                        name = data["nomes"][i]
                        counts[name] = counts.get(name, 0) + 1
                        # determine the recognized face with the largest number
                        # of votes (note: in the event of an unlikely tie Python
                        # will select first entry in the dictionary)
                        name = max(counts, key=counts.get)

                        # atualiza lista de nomes
                        names.append(name)
                        # exibe a caixa com nome reconhecido em volta do rosto
                    faces.showFaces(frame, rostos, name, names)
                else:
                    # exibe a caixa com nome UNKNOWN em volta do rosto
                    print('[INFO] matches in frame:' + str(len(matchedIds)))
                    faces.showFaces(frame, rostos, name, None)
            else:
                # exibe a caixa com nome UNKNOWN em volta do rosto
                print('[INFO] no matches found')
                faces.showFaces(frame, rostos, name, None)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
示例#9
0
def main(videofilename):
    faces = Faces(videofilename)
    for i, f, totframes in common.video.frames(videofilename):
#    for i, f, totframes in common.video.frames(videofilename, maxframes=1000):
        print >> sys.stderr, "Processing %s, image %s" % (f, common.str.percent(i+1, totframes))
        print >> sys.stderr, stats()
        image = cvLoadImage(f)
        faces.set_dimensions(image.width, image.height)
        faces.add_frame(i, detect_faces(image))

        if i % 100 == 0 and i != 0:
            print >> sys.stderr, common.json.dumps(faces.__getstate__())
    print common.json.dumps(faces.__getstate__())
示例#10
0
def main(to_reload, student_name, image_size, latent_space):

    IMG_DIM = image_size

    ARCHITECTURE = [
        IMG_DIM**2,  # 784 pixels
        500,
        500,  # intermediate encoding
        latent_space
    ]  # latent space dims
    # (and symmetrically back out again)

    numpy_dir = './vae_ready_numpy_arrays/' + student_name + '/' + student_name + '.npy'
    images_dir = './video_data/' + student_name + '/video_1/images/'

    if to_reload != 'None':  # restore
        meta_graph_dir = './out/' + student_name + '/' + to_reload
        v = vae_face.VAE(ARCHITECTURE, HYPERPARAMS, meta_graph=meta_graph_dir)
        print("Loaded!")
        if image_size != 0:
            makeVideo(v, student_name, images_dir, image_size, latent_space)
        else:
            print('Need image path to make video!')

    else:  # train
        faces = Faces(numpy_dir)
        LOG_DIR = './log/' + student_name + '/'
        METAGRAPH_DIR = './out/' + student_name + '/'
        v = vae_face.VAE(ARCHITECTURE, HYPERPARAMS, log_dir=LOG_DIR)
        v.train(faces,
                max_iter=MAX_ITER,
                max_epochs=MAX_EPOCHS,
                cross_validate=False,
                verbose=True,
                save=True,
                outdir=METAGRAPH_DIR)
        print("Trained!")
示例#11
0
# port does not seem to play well with the exposure settings (produces either no answer or causes errors depending
# on the cameras used)
FRONT_CAM_GEAR_EXPOSURE = 0
FRONT_CAM_NORMAL_EXPOSURE = -1   # Camera default

frontCam = Camera(name="FrontCam", src=0, width=320, height=240, 
                  exposure=FRONT_CAM_GEAR_EXPOSURE).start()

while not frontCam.isRunning():
    time.sleep(0.001)

print("Cameras are online!")


# OpenCV pipelines for Front Processor
frontPipes = {'faces'       : Faces('Faces'),
              'redBoiler'   : Nada('RedBoiler'),
              'blueBoiler'  : Nada('BlueBoiler'),
              'gearLift'    : GearLift('GearLift', bvTable)
              }

frontProcessor = Processor("frontProcessor", frontCam, frontPipes['faces']).start()
# This is just an example of a 2nd Processor
# Note that it's OK to use the same Camera (frontCam in this case) to feed multiple Processors
frontProc2 = Processor( "frontProc2", frontCam, frontPipes['gearLift']).start()

while not frontProcessor.isRunning():
    time.sleep(0.001)
while not frontProc2.isRunning():
    time.sleep(0.001)
    
示例#12
0
from m5stack import *
from faces import Faces

keyboard = Faces()

# read once
print("Key value:", end='')
print(keyboard.read())


# callback
def keyboard_cb(value):
    print("Key value:", end='')
    print(value)
    lcd.print(value)


keyboard.callback(keyboard_cb)
示例#13
0
 def __init__(self, sequence):
     self.sequence = sequence  # The sequence of the colors
     self.faces = Faces()  # Datastructure to model the color constraint
     self.partial = dict()  # Contains the partial solutions
     self.solutions = 0
     self.steps = 0
示例#14
0
class Interface:

    def __init__(self):
        self.faces = Faces()
        self.faces.ler_dados(open(ARQUIVO))
        self.zoom = 80
        self.aspecto = 0
        self.angulo = 0

    def init(self):
        glClearColor(1, 1, 1, 1)

    # Função usada para especificar o volume de visualização
    def EspecificaParametrosVisualizacao(self):

        glEnable(GL_CULL_FACE) # Habilita remocao de faces traseiras

    	glMatrixMode(GL_PROJECTION)
    	glLoadIdentity()

    	gluPerspective(self.zoom,self.aspecto,0.5,500)

    	glMatrixMode(GL_MODELVIEW)
    	glLoadIdentity()

    	gluLookAt(0,0,50,
    	          0,0,0,
    	          0,1,0)

    def gerencia_mouse(self, button, state, x, y):
        if (button == GLUT_LEFT_BUTTON):
        	if (state == GLUT_DOWN): # Zoom-in
        		if (self.zoom >= 10): self.zoom -= 5

        if (button == GLUT_RIGHT_BUTTON):
        	if (state == GLUT_DOWN): # Zoom-out
        		if (self.zoom <= 130): self.zoom += 5

        self.EspecificaParametrosVisualizacao()
        glutPostRedisplay()

    def gerencia_teclado(self, tecla, x, y):
        if tecla == GLUT_KEY_LEFT: # direita
    		self.angulo += 2
        if tecla == GLUT_KEY_RIGHT: # esquerda
    		self.angulo -= 2


        self.EspecificaParametrosVisualizacao()
        glutPostRedisplay()

    def display(self):
    	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
    	glColor3f(0.0, 0.0, 1.0)

        glRotatef(self.angulo, 0, 1, 1)
        glCullFace(GL_FRONT) # Habilita remocao de faces traseiras
        self.faces.desenha()

    	glutSwapBuffers()

    def reshape(self, largura, altura):
        if ( altura == 0 ): altura = 1
    	# Especifica o tamanho da viewport
        glViewport(0 , 0, largura, altura)
    	# Calcula a correção de aspecto
        self.aspecto = largura/altura

    	self.EspecificaParametrosVisualizacao()

    def main(self):
        glutInit(argv)

        glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)
    	glutInitWindowSize(600,600)
    	glutCreateWindow("Faces 3D")
        glutDisplayFunc(self.display)
        glutReshapeFunc(self.reshape)
        glutSpecialFunc(self.gerencia_teclado)
    	glutMouseFunc(self.gerencia_mouse)
        self.init()
        glutMainLoop()
示例#15
0
location = bvTable.getAutoUpdateValue('allianceLocation',1)

# NOTE: NOTE: NOTE
#
# For now just create one image pipeline to share with each image processor
# LATER we will modify this to allow for a dictionary (switch-like) interface
# to be injected into the image processors; this will allow the image processors
# to have a selector for exclusion processor of different pipelines
#
# I.e., the idea is to create separate image processors when concurrent pipelines
# are desired (e.g., look for faces AND pink elephants at the same time), and place
# the exclusive options into a single processor (e.g., look for faces OR pink elephants)

nada = Nada()
cubes = Cubes()
faces = Faces()
balls = FindBalls()

# NOTE: NOTE: NOTE:
#
# YOUR MILEAGE WILL VARY
# The exposure values are camera/driver dependent and have no well defined standard (i.e., non-portable)
# Our implementation is forced to use v4l2-ctl (Linux) to make the exposure control work because our OpenCV
# port does not seem to play well with the exposure settings (produces either no answer or causes errors depending
# on the camera used)
FRONT_CAM_GEAR_EXPOSURE = 0
FRONT_CAM_NORMAL_EXPOSURE = -1   # Camera default

frontCam = BucketCapture(name="FrontCam",src=0,width=320,height=240,exposure=FRONT_CAM_GEAR_EXPOSURE).start()    # start low for gears
#backCam = BucketCapture(name="BackCam",src=1,width=320,height=240,exposure=FRONT_CAM_GEAR_EXPOSURE).start()    # start low for gears
示例#16
0
 def __init__(self):
     self.running = False
     faces = Faces()
     self.known_face_encodings = faces.getKnownFaceEncodings()
     self.known_face_names = faces.getKnownFaceNames()
示例#17
0
 def __init__(self):
     self.faces = Faces()
     self.faces.ler_dados(open(ARQUIVO))
     self.zoom = 80
     self.aspecto = 0
     self.angulo = 0
示例#18
0
file_path = '/Users/ruslanpepa/PycharmProjects/testrin-pycharm/octahedron.txt'
VERTEX = 6  # количество вершин в многограннике
EDGES = 12  # количество ребер в многограннике
FACES = 8  # количестов граней в многограннике
TIMES = 10000  # количество шагов по времени
list_faces = []  # список, который будет содержать все грани
with open(file_path) as fl_wth_fs:  # выгрузим из файла все номера вершин
    lines = fl_wth_fs.readlines()
for line in lines:  # все номера вершин загоним в списко файлов
    ns_vx = line.rstrip('\n').split(
        '\t')  # получили только числа из каждой строки
    a = int(ns_vx[0])
    b = int(ns_vx[1])
    c = int(ns_vx[2])
    list_faces.append(Faces(a, b, c))
conformal_weights = np.zeros((VERTEX, TIMES),
                             float)  # конформные веса в вершинах
gauss_curve = adjacency_matrix(
    list_faces, VERTEX)  # гауссова кривизна в вершинах многогранника
length_matrix = adjacency_matrix(list_faces,
                                 VERTEX)  # матрица смежности длин рёбер
for i in range(0, VERTEX):
    conformal_weights[i, 0] = 1
# заполним матрицу длин рёбер случайными наборами чисел
# for i in range(0, length_matrix.count_nonzero()):
#     row, col = length_matrix.nonzero()  # список все индексов в строке, которые
#     length_matrix[row[i], col[i]] = random.randrange(1, 10, 1) + 0.1*random.randrange(0, 9, 1)
# # print('hello')
caley_menger = None
while caley_menger != None:
示例#19
0
        cv2.namedWindow("input")
        while(True):
            f, img = capture.read()
            faces=self.mfaces.get_faces_from_image(img)
            for face in faces:
                face=self.mfaces.normalize(face)
                prediction=self.mfaces.predict_actor_from_face_image(face)
                print prediction
                if int(prediction[0])==int(actor_index):
                    cv2.imshow("input",face)
                    cv2.waitKey(1)
            #img=self.mfaces.normalize(img)
            #prediction=self.mfaces.predict_actors_from_image(img)
            #print prediction
            #cv2.imshow("input", img)
            #cv2.waitKey(1)

    def __init__(self,faces,data_path="/tmp/videos/"):
        self.mdata_path=data_path
        self.mfaces=faces
        self.mfaces.index_actors()
        if not os.path.exists(self.mdata_path):
            os.makedirs(self.mdata_path)
if __name__=='__main__':
    mfaces=Faces()
    mfaces.train()
    mvideo=Video(mfaces)
#    mvideo.get_actor_from_video("/home/volcan/Desktop/development/FlyingCircus/videos/JohnCleese/John_Cleese_Carefully_Considers_Your_Futile_Comments.avi"
    mvideo.get_actor_from_video("/home/volcan/Desktop/development/FlyingCircus/videos/JohnCleese/The_Brain.flv"
                                ,"nm0000092")