Esempio n. 1
0
    def initialize(self, base_info, diff_data, game_setting):
        self.base_info = base_info
        self.game_setting = game_setting

        # seed設定
        random.seed(random.random() + self.base_info["agentIdx"])

        self.myrole = base_info["myRole"]
        self.result_seer = []
        self.result_med = []
        self.player_size = len(self.base_info["remainTalkMap"].keys())
        self.talk_turn = 0
        self.honest = False
        self.divined_as_wolf = []
        self.divined_as_human = []
        self.wrong_divine = set()
        self.white = set()
        self.black = set()
        self.greys = set()
        self.check_alive()
        self.seers = set()
        self.tryingPP = set()
        self.greys = set(self.alive) - {int(base_info["agentIdx"])}
        self.players = self.greys.copy()
        self.whisper_turn = 0
        self.attack_success = True
        self.attacked_who_lastnight = 0
        self.gen.gameInitialize(self.base_info["agentIdx"], self.player_size)
        #self.q = [] # 廃止しました
        self.seer_divined_me_as_werewolf = set()
        self.estimated_me_as_werewolf = set()
        self.estimated_me_as_human = set()
        self.asked_why_divine = set()
        self.asked_why_doubt = set()
        self.PPmode = False
        self.when_declare = random.randint(4, 8)
        self.emo = emotion.Emotion(self.alive)
        self.stealth = False if random.random() < 0.75 else True
        self.who_said_black = dict(zip(self.alive, [[] for i in self.alive]))
        self.has_CO_seer = False
        self.emo.myrole_appearance = base_info["myRole"]
        self.emo.myrole = base_info["myRole"]
Esempio n. 2
0
    def __init__(self, canvas, media_module, media_file):
        self.vid = emotion.Emotion(canvas, module_filename=media_module)
        self.member_add(self.vid)
        self.vid.file = media_file
        self.vid.smooth_scale = True

        self.frame = edje.Edje(canvas,
                               file=theme_file,
                               size=(320, 240),
                               group="video_controller")
        self.member_add(self.frame)
        self.frame.part_swallow("video_swallow", self.vid)
        self.frame.data["moving"] = False
        self.frame.data["resizing"] = False
        self.frame.part_drag_value_set("video_speed", 0.0, 1.0)
        self.frame.part_text_set("video_speed_txt", "1.0")
        self.vid.show()

        evas.SmartObject.__init__(self, canvas)
        self._setup_signals()
Esempio n. 3
0
def main():
    # Detect Face factor
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.8]  # three steps's threshold
    factor = 0.709  # scale factor

    print('Creating networks and loading parameters')
    # with tf.Graph().as_default():
    #     sess = tf.Session()
    #     with sess.as_default():
    #         pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

    # Load emotion
    emotion = em.Emotion()
    emotion.load_weights("models/emotion/emotion.h5")

    # Camera
    capture = cv2.VideoCapture(0)

    if not capture:
        print("Can't get image from webcam")
        sys.exit(0)

    frame_interval = 3
    count = 0

    while True:
        cam_status, frame = capture.read()
        if not cam_status:
            print(cam_status)
            break

        if count == 0:
            img = frame
            bounding_boxes, _ = align.detect_face.detect_face(
                img, minsize, pnet, rnet, onet, threshold, factor)

            # if bounding_boxes.shape[0] <= 0:
            #     continue

            # Prediction and Draw rectangle
            for face_position in bounding_boxes:
                face_position = face_position.astype(int)

                # Predict emotion
                face = crop_and_resize(img,
                                       (face_position[0], face_position[1],
                                        face_position[2], face_position[3]))

                emotion_result = emotion.predict(face)
                emotion_max = max(emotion_result, key=emotion_result.get)
                face_emotion = "{} {:.5f}".format(emotion_max,
                                                  emotion_result[emotion_max])

                # Draw rectangle at the face position
                cv2.rectangle(frame, (face_position[0], face_position[1]),
                              (face_position[2], face_position[3]),
                              (0, 255, 0), 2)
                cv2.putText(frame, face_emotion,
                            (face_position[0], face_position[1] - 20),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 255, 0))

            cv2.imshow('Webcam', frame)
        else:
            pass

        count += 1
        if count >= frame_interval:
            count = 0

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=0.75, allow_growth=True)
    tf_config = tf.ConfigProto(
        gpu_options=gpu_options, log_device_placement=False)
    sess = tf.Session(config=tf_config)
    with sess.as_default():
        pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)

        facenet.load_model(os.path.abspath("./models/facenet/20180402-114759/"))

        images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")

# Load emotion
emotion = em.Emotion()
emotion.load_weights("models/emotion/emotion.h5")

# Load Facenet
classifier_filename_exp = os.path.abspath("./models/facenet/20180402-114759/lfw_classifier.pkl")
with open(classifier_filename_exp, 'rb') as infile:
    (model, class_names) = pickle.load(infile)

def load_facenet_model():
    classifier_filename_exp = os.path.abspath("./models/facenet/20180402-114759/lfw_classifier.pkl")
    with open(classifier_filename_exp, 'rb') as infile:
        (model, class_names) = pickle.load(infile)

def crop_and_resize(image, position, size):
    """ Crop Function take path, x1, y1, x2, y2 then give back cropped photo """
    posx1, posy1, posx2, posy2 = position
Esempio n. 5
0
def cozmo_manage_emotion(robot: cozmo.robot.Robot):
    cozmo_robot = emotion.Emotion(robot)
    robot_plot = pd.DataFrame(cozmo_robot.emotions, index=[0])
    print("Welcome to the emotional manager!")
    print("Please select a emotion.")
    results_view_loop(cozmo_robot, robot_plot)