Пример #1
0
    def __init__(self, model_dir, class_type):
        self.graph = tf.Graph()
        # config = tf.ConfigProto(allow_soft_placement=True)
        # self.sess = tf.Session(config=config, graph=self.graph)
        self.sess = tf.Session(graph=self.graph)
        with self.graph.as_default():
            # import saved model from loc into local graph
            label_list = AGE_LIST if class_type == 'age' else GENDER_LIST
            nlabels = len(label_list)
            if class_type == "age":
                model_fn = select_model(FLAGS.age_model_type)
            else:
                model_fn = select_model(FLAGS.sex_model_type)

            self.images = tf.placeholder(tf.float32,
                                         [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, self.images, 1, False)
            # init = tf.global_variables_initializer()
            saver = tf.train.Saver()
            requested_step = FLAGS.requested_step if FLAGS.requested_step else None
            checkpoint_path = '%s' % (model_dir)
            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)
            saver.restore(self.sess, model_checkpoint_path)
            self.softmax_output = tf.nn.softmax(logits)
Пример #2
0
def eval_once(saver, summary_writer, summary_op, logits, labels, num_eval, requested_step=None):
    """Run Eval once.
    Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
    """
    top1 = tf.nn.in_top_k(logits, labels, 1)
    top2 = tf.nn.in_top_k(logits, labels, 2)

    with tf.Session() as sess:
        checkpoint_path = '%s/run-%d' % (FLAGS.train_dir, FLAGS.run_id)

        model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)

        saver.restore(sess, model_checkpoint_path)

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
                                                 start=True))
            num_steps = int(math.ceil(num_eval / FLAGS.batch_size))
            true_count1 = true_count2 = 0
            total_sample_count = num_steps * FLAGS.batch_size
            step = 0
            #print(FLAGS.batch_size, num_steps)

            while step < num_steps and not coord.should_stop():
                start_time = time.time()
                v, predictions1, predictions2 = sess.run([logits, top1, top2])
                duration = time.time() - start_time
                sec_per_batch = float(duration)
                examples_per_sec = FLAGS.batch_size / sec_per_batch

                true_count1 += np.sum(predictions1)
                true_count2 += np.sum(predictions2)
                format_str = ('%s (%.1f examples/sec; %.3f sec/batch)')
                #print(format_str % (datetime.now(),
                                    examples_per_sec, sec_per_batch))

                step += 1

            # Compute precision @ 1.

            precision1 = true_count1 / total_sample_count
            precision2 = true_count2 / total_sample_count
            #print('%s: precision @ 1 = %.3f (%d/%d)' % (datetime.now(), precision1, true_count1, total_sample_count))
            #print('%s: precision @ 2 = %.3f (%d/%d)' % (datetime.now(), precision2, true_count2, total_sample_count))

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            summary.value.add(tag='Precision @ 1', simple_value=precision1)
            summary.value.add(tag='Precision @ 2', simple_value=precision2)
            summary_writer.add_summary(summary, global_step)
        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)
Пример #3
0
    def __init__(
            self,
            model_dir='/usr/src/app/deps/rude-carnie/inception_gender_checkpoint',
            model_type='inception',
            class_type='gender'):
        '''
        Just a wrapper around guess.py.
        '''
        self.model_dir = model_dir
        self.model_type = model_type
        self.class_type = class_type
        self.sess = tf.Session()
        model_fn = select_model(self.model_type)
        self.label_list = AGE_LIST if self.class_type == 'age' else GENDER_LIST
        nlabels = len(self.label_list)
        self.images = tf.placeholder(tf.string, [None])
        standardize = tf.map_fn(self.decode, self.images, dtype=tf.float32)
        logits = model_fn(nlabels, standardize, 1, False)
        init = tf.global_variables_initializer()

        requested_step = None  #FLAGS.requested_step if FLAGS.requested_step else None

        checkpoint_path = '%s' % (self.model_dir)
        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, None)
        #FLAGS.checkpoint)

        saver = tf.train.Saver()
        saver.restore(self.sess, model_checkpoint_path)

        self.softmax_output = tf.nn.softmax(logits)

        self.coder = ImageCoder()
Пример #4
0
def guess_loop(class_type, model_type, model_dir, q, guess_q):
    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:
        label_list = AGE_LIST if class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        model_fn = select_model(model_type)

        with tf.device('/cpu:0'):
            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)

            init = tf.global_variables_initializer()

            checkpoint_path = '%s' % (model_dir)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, None, 'checkpoint')

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)


            while True:
                image_files = q.get()
                guess_result = []

                for image in image_files:
                    guess = classify_one_multi_crop(sess, label_list, softmax_output, images, image)
                    if guess is not None:
                        guess_result.append(guess)

                if guess_q.full() is not True:
                    guess_q.put(guess_result)
Пример #5
0
def guessGender(file_name):  # pylint: disable=unused-argument
    #检测单张照片的性别
    with tf.Session() as sess:
        with tf.device(FLAGS.device_id):
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (GENDER_MODEL_PATH)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()

            try:
                best_choice = classify(sess, label_list, softmax_output, coder, images, file_name)
                # print(best_choice)
                return(best_choice)


            except Exception as e:
                print(e)
                print('Failed to run image %s ' % file)
Пример #6
0
    def __init__(self):
        #don t forget to reset graph or error (olivier)
        #tf.reset_default_graph()
        self._sess_default = tf.Session()
        self.g_1 = tf.Graph()
        with self.g_1.as_default():

            files = []
            config = tf.ConfigProto(allow_soft_placement=True)
            self._sess = tf.Session(config=config)

            label_list = AGE_LIST
            nlabels = len(label_list)

            print('Executing on %s' % DEVICE_ID)
            with tf.device(DEVICE_ID):

                images = tf.placeholder(tf.float32,
                                        [None, RESIZE_FINAL, RESIZE_FINAL, 3])
                logits, feature = inception_v3_test(nlabels, images, 1, False)
                init = tf.global_variables_initializer()

                #restore a specific "checkpoint" (which step) olivier
                requested_step = None

                checkpoint_path = MODEL_DIR

                model_checkpoint_path, global_step = get_checkpoint(
                    checkpoint_path, requested_step, CHECKPOINT)
                #[print(n.name) for n in tf.get_default_graph().as_graph_def().node]
                saver = tf.train.Saver()
                saver.restore(self._sess, model_checkpoint_path)

                softmax_output = tf.nn.softmax(logits)
                coder = ImageCoder()

                #image_files=[FILENAME,FILENAME,FILENAME]

                writer = None

                #check tensorboard (olivier)
                writerTB = tf.summary.FileWriter('log',
                                                 graph=tf.get_default_graph())
                self.label_list = label_list
                self.softmax_output = softmax_output
                self.coder = coder
                self.images = images,
                self.writer = writer
                self.feature = feature

        self.g_2 = tf.Graph()
        with self.g_2.as_default():
            self._sessGender = tf.Session()
            self.prediction, self.input = genderClassifier()
            init = tf.global_variables_initializer()
            new_saver = tf.train.import_meta_graph(genderpath)
            new_saver.restore(self._sessGender,
                              tf.train.latest_checkpoint(genderckp))
def guessGender(path):  # pylint: disable=unused-argument

    with tf.Session() as sess:

        # tf.reset_default_graph()
        label_list = GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (GENDER_MODEL_PATH)

            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)
            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()
            files = deal_file.get_files(path)
            font = cv2.FONT_HERSHEY_SIMPLEX

            try:
                for f in files:
                    best_choice = classify(sess, label_list, softmax_output,
                                           coder, images, f)
                    # print(best_choice)
                    pic = cv2.imread(f)
                    fname = f[:-4] + '_test' + f[-4:]
                    cv2.putText(pic, best_choice[0], (5, 40), font, 1,
                                (100, 255, 50), 2, cv2.LINE_AA)
                    cv2.imwrite(fname, pic)
                    print(best_choice)

            except Exception as e:
                print(e)
                print('Failed to run image %s ' % file)
    def extract(self):
        with tf.Session(config=self.config) as sess:
            nlabels = len(self.label_list)
            model_fn = select_model(FLAGS.model_type)
            with tf.device(FLAGS.device_id):
                logits = model_fn(nlabels, images, 1, False)
                images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
                init = tf.global_variables_initializer()
                requested_step = FLAGS.requested_step if FLAGS.requested_step else None
                checkpoint_path = '%s' % (FLAGS.model_dir)
                model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
                saver = tf.train.Saver()
                saver.restore(sess, model_checkpoint_path)
                softmax_output = tf.nn.softmax(logits)
                coder = ImageCoder()
                # Support a batch mode if no face detection model
                if len(files) == 0:
                    if (os.path.isdir(FLAGS.filename)):
                        for relpath in os.listdir(FLAGS.filename):
                            abspath = os.path.join(FLAGS.filename, relpath)
                            
                            if os.path.isfile(abspath) and any([abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
                                print(abspath)
                                files.append(abspath)
                    else:
                        files.append(FLAGS.filename)
                        # If it happens to be a list file, read the list and clobber the files
                        if any([FLAGS.filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
                            files = list_images(FLAGS.filename)
                writer = None
                output = None
                if FLAGS.target:
                    print('Creating output file %s' % FLAGS.target)
                    output = open(FLAGS.target, 'w')
                    writer = csv.writer(output)
                    writer.writerow(('file', 'label', 'score'))
                image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
                print(image_files)
                if FLAGS.single_look:
                    classify_many_single_crop(sess, label_list, softmax_output, coder, images, image_files, writer)

                else:
                    for image_file in image_files:
                        classify_one_multi_crop(sess, label_list, softmax_output, coder, images, image_file, writer)

                if output is not None:
                    output.close()
def guessGender(path):  # pylint: disable=unused-argument
# 检测文件夹中所有照片的性别

    with tf.Session() as sess:

        # tf.reset_default_graph()
        label_list = GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (GENDER_MODEL_PATH)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()
            files = get_files(path)
            gender_dict = {}

            try:
                for f in files:
                    best_choice = classify(sess, label_list, softmax_output, coder, images, f)
                    # print(best_choice)
                    gender_dict[f[len(path) + 1:]] = best_choice
                    return(best_choice)


            except Exception as e:
                print(e)
                print('Failed to run image %s ' % file)
Пример #10
0
def guessAge(image_file):

    #import!!!Fix the bug http://stackoverflow.com/questions/33765336/remove-nodes-from-graph-or-reset-entire-default-graph
    tf.reset_default_graph()
    with tf.Session() as sess:

        age_label_list = AGE_LIST
        agelabels = len(age_label_list)

        # print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model('inception')

        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits_age = model_fn(agelabels, images, 1, False)
        init = tf.global_variables_initializer()

        requested_step = FLAGS.requested_step if FLAGS.requested_step else None

        checkpoint_path = '%s' % (AGE_MODEL_PATH)
        # update in 0.11 version

        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, FLAGS.checkpoint)
    #print 'model_checkpoint_path is', model_checkpoint_path
    #print model_checkpoint_path
    saver = tf.train.Saver()
    if not saver.last_checkpoints:
        saver.restore(sess, model_checkpoint_path)

    softmax_output = tf.nn.softmax(logits_age)

    coder = ImageCoder()

    files = []

    # detect age
    best_choice = classify(sess, age_label_list, softmax_output, coder, images,
                           image_file)

    sess.close()
    return best_choice
Пример #11
0
def main(argv=None):  # pylint: disable=unused-argument

    with tf.Session() as sess:

        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits = model_fn(nlabels, images, 1, False)
        init = tf.initialize_all_variables()

        requested_step = FLAGS.requested_step if FLAGS.requested_step else None

        checkpoint_path = '%s' % (FLAGS.model_dir)

        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, FLAGS.checkpoint)

        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)

        softmax_output = tf.nn.softmax(logits)

        coder = ImageCoder()

        files = []

        if FLAGS.face_detection_model:
            print('Using face detector %s' % FLAGS.face_detection_model)
            face_detect = FaceDetector(FLAGS.face_detection_model)
            face_files, rectangles = face_detect.run(FLAGS.filename)
            files += face_files

        if len(files) == 0:
            files.append(FLAGS.filename)

        for f in files:
            classify(sess, label_list, softmax_output, coder, images, f)
Пример #12
0
def guessGender(image_file):
    tf.reset_default_graph()
    with tf.Session() as sess:

        #sess = tf.Session()
        age_label_list = AGE_LIST
        gender_label_list = GENDER_LIST
        genderlabels = len(gender_label_list)

        # print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model('')

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits_gender = model_fn(genderlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (GENDER_MODEL_PATH)

            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits_gender)

            coder = ImageCoder()

            files = []

            # detect gender
            #try:
            best_choice = classifyGender(sess, gender_label_list,
                                         softmax_output, coder, images,
                                         image_file)
            return best_choice
Пример #13
0
    def __init__(self, model: nn.Module, checkpoint, image_net, random_samples,
                 indices, images_dir, use_cpu, name, image_resize, **kwargs):
        self.checkpoint = checkpoint
        self.image_net = image_net
        self.random_samples = random_samples
        self.indices = indices
        self.image_resize = image_resize

        self.device = common.get_device(cpu_force=use_cpu)
        self.model = model
        self.images_dir = images_dir
        self.use_cpu = use_cpu
        self.name = name

        ch = get_checkpoint(checkpoint, image_net)
        if ch is not None:
            model.load_state_dict(ch, strict=False)
        else:
            print('Using random weights!')

        self.dataset = BasicDataset(labels_csv=dataset_path(
            prop('datasets.test')),
                                    transforms=self.get_image_transforms(),
                                    img_dir=self.images_dir)

        self.mask_loader = MaskLoader(preset_attr=self.dataset.labels(),
                                      ts=self.get_image_transforms())

        self.label_to_index_total = {}
        for i, name in enumerate(self.dataset.labels()):
            self.label_to_index_total[name] = i

        if self.random_samples is None and self.indices is None:
            raise AttributeError(
                'Expected one of `indices` or `random_samples`')

        if self.indices is None and self.random_samples is not None:
            self.indices = np.random.random_integers(low=0,
                                                     high=len(self.dataset) -
                                                     1,
                                                     size=self.random_samples)
Пример #14
0
def guessGender(image_file):
    with tf.Session() as sess:

        sess = tf.Session()
        age_label_list = AGE_LIST
        gender_label_list = GENDER_LIST
        genderlabels = len(gender_label_list)

        # print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model('inception')

        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits_gender = model_fn(genderlabels, images, 1, False)
        init = tf.global_variables_initializer()

        requested_step = FLAGS.requested_step if FLAGS.requested_step else None

        checkpoint_path = '%s' % (FLAGS.model_dir)

        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, FLAGS.checkpoint)

        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)

        softmax_output = tf.nn.softmax(logits_gender)

        coder = ImageCoder()

        files = []

        # detect age
        try:
            best_choice = classify(sess, gender_label_list, softmax_output,
                                   coder, images, image_file)
            return best_choice
        except Exception as e:
            print(e)
            print('Failed to run image %s ' % image_file)
Пример #15
0
def model_init(sess, model_path, label_list):

    model_checkpoint_path, global_step = get_checkpoint(
        model_path, None, FLAGS.checkpoint)

    nlabels = len(label_list)
    model_fn = select_model(FLAGS.model_type)
    images_placeholder = tf.placeholder(tf.float32,
                                        [None, RESIZE_FINAL, RESIZE_FINAL, 3])

    with tf.device(FLAGS.device_id):

        logits = model_fn(nlabels, images_placeholder, 1, False)

        init = tf.global_variables_initializer()

        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)

        softmax_output = tf.nn.softmax(logits)

        return softmax_output, images_placeholder
Пример #16
0
 def __init__(self,session_conf,class_type,model_dir,requested_step='',model_type='inception'):
     self.graph=tf.Graph()#为每个类(实例)单独创建一个graph
     self.session_conf=session_conf
     self.sess=tf.Session(graph=self.graph,config=self.session_conf)#创建新的sess
     self.label_list = AGE_LIST if class_type == 'age' else GENDER_LIST
     self.model_dir=model_dir
     self.requested_step=requested_step
     with self.sess.as_default():
          with self.graph.as_default():                    
                 nlabels = len(self.label_list)
                 print('Executing on %s' % device_id)
                 model_fn = select_model(model_type)
                 with tf.device(device_id):
                     self.images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
                     logits = model_fn(nlabels, self.images, 1, False)
                     init = tf.global_variables_initializer()
                     requested_step = self.requested_step if self.requested_step else None
                     checkpoint_path = '%s' % (self.model_dir)
                     ######gender_model_dir
                     model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, checkpoint)                                   
                     self.saver = tf.train.Saver()
                     self.saver.restore(self.sess, model_checkpoint_path)#从恢复点恢复参数                       
                     self.softmax_output = tf.nn.softmax(logits)
                     self.coder = ImageCoder()
def main(argv=None):
    with tf.Graph().as_default():
        serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
        feature_configs = {
            'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
        }
        tf_example = tf.parse_example(serialized_tf_example, feature_configs)
        jpegs = tf_example['image/encoded']

        images = tf.map_fn(preproc_jpeg, jpegs, dtype=tf.float32)
        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        config = tf.ConfigProto(allow_soft_placement=True)
        with tf.Session(config=config) as sess:
            model_fn = select_model(FLAGS.model_type)
            logits = model_fn(nlabels, images, 1, False)
            softmax_output = tf.nn.softmax(logits)
            values, indices = tf.nn.top_k(
                softmax_output, 2 if FLAGS.class_type == 'age' else 1)
            class_tensor = tf.constant(label_list)
            table = tf.contrib.lookup.index_to_string_table_from_tensor(
                class_tensor)
            classes = table.lookup(tf.to_int64(indices))
            requested_step = FLAGS.requested_step if FLAGS.requested_step else None
            checkpoint_path = '%s' % (FLAGS.model_dir)
            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)
            print('Restored model checkpoint %s' % model_checkpoint_path)

            output_path = os.path.join(
                tf.compat.as_bytes(FLAGS.output_dir),
                tf.compat.as_bytes(str(FLAGS.model_version)))
            print('Exporting trained model to %s' % output_path)
            builder = tf.saved_model.builder.SavedModelBuilder(output_path)

            # Build the signature_def_map.
            classify_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(
                serialized_tf_example)
            classes_output_tensor_info = tf.saved_model.utils.build_tensor_info(
                classes)
            scores_output_tensor_info = tf.saved_model.utils.build_tensor_info(
                values)
            classification_signature = (
                tf.saved_model.signature_def_utils.build_signature_def(
                    inputs={
                        tf.saved_model.signature_constants.CLASSIFY_INPUTS:
                        classify_inputs_tensor_info
                    },
                    outputs={
                        tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
                        classes_output_tensor_info,
                        tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
                        scores_output_tensor_info
                    },
                    method_name=tf.saved_model.signature_constants.
                    CLASSIFY_METHOD_NAME))

            predict_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(
                jpegs)
            prediction_signature = (
                tf.saved_model.signature_def_utils.build_signature_def(
                    inputs={'images': predict_inputs_tensor_info},
                    outputs={
                        'classes': classes_output_tensor_info,
                        'scores': scores_output_tensor_info
                    },
                    method_name=tf.saved_model.signature_constants.
                    PREDICT_METHOD_NAME))

            legacy_init_op = tf.group(tf.tables_initializer(),
                                      name='legacy_init_op')
            builder.add_meta_graph_and_variables(
                sess, [tf.saved_model.tag_constants.SERVING],
                signature_def_map={
                    'predict_images':
                    prediction_signature,
                    tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    classification_signature,
                },
                legacy_init_op=legacy_init_op)

            builder.save()
            print('Successfully exported model to %s' % FLAGS.output_dir)
Пример #18
0
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (FLAGS.model_dir)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()

            # Support a batch mode if no face detection model
            if len(files) == 0:
                if (os.path.isdir(FLAGS.filename)):
                    for relpath in os.listdir(FLAGS.filename):
                        abspath = os.path.join(FLAGS.filename, relpath)

                        if os.path.isfile(abspath) and any(
Пример #19
0
def main(argv=None):  # pylint: disable=unused-argument


    with tf.Session() as sess:

        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits = model_fn(nlabels, images, 1, False)
        init = tf.global_variables_initializer()
            
        requested_step = FLAGS.requested_step if FLAGS.requested_step else None
        
        checkpoint_path = '%s' % (FLAGS.model_dir)

        model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
            
        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)
                        
        softmax_output = tf.nn.softmax(logits)

        coder = ImageCoder()

        files = []

        if FLAGS.face_detection_model:
            print('Using face detector %s' % FLAGS.face_detection_model)
            face_detect = FaceDetector(FLAGS.face_detection_model)
            face_files, rectangles = face_detect.run(FLAGS.filename)
            files += face_files

        # Support a batch mode if no face detection model
        if len(files) == 0:
            files.append(FLAGS.filename)
            # If it happens to be a list file, read the list and clobber the files
            if one_of(FLAGS.filename, ('csv', 'tsv', 'txt')):
                files = batchlist(FLAGS.filename)

        writer = None
        output = None
        if FLAGS.target:
            print('Creating output file %s' % FLAGS.target)
            output = open(FLAGS.target, 'w')
            writer = csv.writer(output)
            writer.writerow(('file', 'label', 'score'))


        for f in files:
            image_file = resolve_file(f)
            
            if image_file is None: continue

            try:
                best_choice = classify(sess, label_list, softmax_output, coder, images, image_file)
                if writer is not None:
                    writer.writerow((f, best_choice[0], '%.2f' % best_choice[1]))
            except Exception as e:
                print(e)
                print('Failed to run image %s ' % image_file)

        if output is not None:
            output.close()
Пример #20
0
def eval_once(saver,
              summary_writer,
              summary_op,
              agelogits,
              agelabels,
              genderlogits,
              genderlabels,
              num_eval,
              saveresultdir,
              requested_step=None):
    # run eval once.
    # saver: Saver
    # summary_writer: summary writer
    # top_k_op: top k op.
    # summary_op: summary op.
    agetop1 = tf.nn.in_top_k(agelogits, agelabels, 1)
    agetop2 = tf.nn.in_top_k(agelogits, agelabels, 2)

    gendertop1 = tf.nn.in_top_k(genderlogits, genderlabels, 1)

    with tf.Session() as sess:
        checkpoint_path = FLAGS.model_dir
        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, FLAGS.checkpoint)
        saver.restore(sess, model_checkpoint_path)

        # start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))

            num_steps = int(math.ceil(num_eval / FLAGS.batch_size))
            agetrue_count1 = agetrue_count2 = gendertrue_count1 = 0
            total_sample_count = num_steps * FLAGS.batch_size
            step = 0

            while step < num_steps and not coord.should_stop():
                start_time = time.time()
                agev, agepredictions1, agepredictions2, genderv, genderpredictions1 = sess.run(
                    [agelogits, agetop1, agetop2, genderlogits, gendertop1])
                duration = time.time() - start_time
                sec_per_batch = float(duration)
                examples_per_sec = FLAGS.batch_size / sec_per_batch

                agetrue_count1 += np.sum(agepredictions1)
                agetrue_count2 += np.sum(agepredictions2)
                gendertrue_count1 += np.sum(genderpredictions1)
                format_str = ('%s (%.1f examples/sec; %.3f sec/batch)')
                print(format_str %
                      (datetime.now(), examples_per_sec, sec_per_batch))

                step += 1

            # compute precision @1
            agepredictions1 = agetrue_count1 / total_sample_count
            agepredictions2 = agetrue_count2 / total_sample_count
            genderpredictions1 = gendertrue_count1 / total_sample_count
            print('Age => %s: precision @ 1 = %.3f (%d/%d)' %
                  (datetime.now(), agepredictions1, agetrue_count1,
                   total_sample_count))
            print('Age => %s: precision @ 2 = %.3f (%d/%d)' %
                  (datetime.now(), agepredictions2, agetrue_count2,
                   total_sample_count))
            print('Gender => %s: precision @ 1 = %.3f (%d/%d)' %
                  (datetime.now(), genderpredictions1, gendertrue_count1,
                   total_sample_count))

            resulttxt = saveresultdir + os.sep + FLAGS.eval_data + '_result.txt'
            with open(resulttxt, 'w') as f:
                f.write('Age => %s: precision @ 1 = %.3f (%d/%d) \n' %
                        (datetime.now(), agepredictions1, agetrue_count1,
                         total_sample_count))
                f.write('Age => %s: precision @ 2 = %.3f (%d/%d) \n' %
                        (datetime.now(), agepredictions2, agetrue_count2,
                         total_sample_count))
                f.write('Gender => %s: precision @ 1 = %.3f (%d/%d) \n' %
                        (datetime.now(), genderpredictions1, gendertrue_count1,
                         total_sample_count))
                f.close()

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            summary.value.add(tag='Age Precision @ 1',
                              simple_value=agepredictions1)
            summary.value.add(tag='Age Precision @ 2',
                              simple_value=agepredictions2)
            summary.value.add(tag='Gender Precision @ 1',
                              simple_value=genderpredictions1)
            summary_writer.add_summary(summary, global_step)

        except Exception as e:
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads, stop_grace_period_secs=10)
Пример #21
0
def eval_once(saver, summary_writer, summary_op, logits, labels, num_eval, requested_step=None):
    """Run Eval once.
    Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
    """
    top1 = tf.nn.in_top_k(logits, labels, 1)
    top2 = tf.nn.in_top_k(logits, labels, 2)

    with tf.Session() as sess:
        checkpoint_path = '%s/run-%d' % (FLAGS.train_dir, FLAGS.run_id)

        model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)

        saver.restore(sess, model_checkpoint_path)

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
                                                 start=True))
            num_steps = int(math.ceil(num_eval / FLAGS.batch_size))
            true_count1 = true_count2 = 0
            total_sample_count = num_steps * FLAGS.batch_size
            step = 0
            print(FLAGS.batch_size, num_steps)

            while step < num_steps and not coord.should_stop():
                start_time = time.time()
                v, predictions1, predictions2 = sess.run([logits, top1, top2])
                duration = time.time() - start_time
                sec_per_batch = float(duration)
                examples_per_sec = FLAGS.batch_size / sec_per_batch

                true_count1 += np.sum(predictions1)
                true_count2 += np.sum(predictions2)
                format_str = ('%s (%.1f examples/sec; %.3f sec/batch)')
                print(format_str % (datetime.now(),
                                    examples_per_sec, sec_per_batch))

                step += 1

            # Compute precision @ 1.

            precision1 = true_count1 / total_sample_count
            precision2 = true_count2 / total_sample_count
            print('%s: precision @ 1 = %.3f (%d/%d)' % (datetime.now(), precision1, true_count1, total_sample_count))
            print('%s: precision @ 2 = %.3f (%d/%d)' % (datetime.now(), precision2, true_count2, total_sample_count))

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            summary.value.add(tag='Precision @ 1', simple_value=precision1)
            summary.value.add(tag='Precision @ 2', simple_value=precision2)
            summary_writer.add_summary(summary, global_step)
        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads, stop_grace_period_secs=10)
Пример #22
0
def detectUndetectedPersons(outfile, undetected_persons):
    #RUDE CARNIE DEFAULTS

    print("starting the process to detect people's age and gender...")

    gender_model_dir = "./age_and_gender_detection/pretrained_checkpoints/gender/"
    age_model_dir = "./age_and_gender_detection/pretrained_checkpoints/age/"
    # What processing unit to execute inference on
    device_id = '/device:GPU:0'
    # Checkpoint basename
    checkpoint = 'checkpoint'
    model_type = 'inception'

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    config = tf.ConfigProto(allow_soft_placement=True)
    # gpu_options=gpu_options)
    # config = tf.ConfigProto(allow_soft_placement=True)

    with tf.Session(config=config) as sess:
        #Age detection model
        n_ages = len(AGE_LIST)
        age_model_fn = select_model(model_type)

        #Gender detection model
        n_genders = len(GENDER_LIST)
        gender_model_fn = select_model(model_type)

        print("initializing the model to detect age and gender")

        with tf.device(device_id):
            print "initializing the model to detect age and gender using ", str(
                device_id)
            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            requested_step = None
            init = tf.global_variables_initializer()

            #age model
            age_logits = age_model_fn("age", n_ages, images, 1, False)
            age_checkpoint_path, global_step = get_checkpoint(
                age_model_dir, requested_step, checkpoint)
            age_vars = set(tf.global_variables())
            saver_age = tf.train.Saver(list(age_vars))
            saver_age.restore(sess, age_checkpoint_path)
            age_softmax_output = tf.nn.softmax(age_logits)

            #gender_model
            gender_logits = gender_model_fn("gender", n_genders, images, 1,
                                            False)
            gender_checkpoint_path, global_step = get_checkpoint(
                gender_model_dir, requested_step, checkpoint)
            gender_vars = set(tf.global_variables()) - age_vars
            saver_gender = tf.train.Saver(list(gender_vars))
            saver_gender.restore(sess, gender_checkpoint_path)
            gender_softmax_output = tf.nn.softmax(gender_logits)

            coder = ImageCoder()

            writer = None

            print(
                "starting the loop for detecting age and gender in each frame")
            time.sleep(
                15)  # sleep to allow the tensor flow/rude carnie stuff to load
            for person_name, person_img in undetected_persons:
                outfile.write("%s%s%s"%(person_name, getAgeAndGender(person_name, person_img, sess, coder, images,\
                        writer, AGE_LIST, GENDER_LIST, age_softmax_output,\
                        gender_softmax_output), '\n'))
Пример #23
0
def construct(filename,
              class_type,
              model_type,
              model_dir,
              checkpoint='checkpoint',
              device='/cpu:0',
              target=None,
              classes=None):  # pylint: disable=unused-argument
    # sys.stdout = os.devnull
    # sys.stderr = os.devnull
    files = []
    with tf.Graph().as_default():
        with tf.Session() as sess:
            #with tf.Session() as sess:
            #print('\n111111111111111\n')
            #tf.reset_default_graph()
            label_list = AGE_LIST if class_type == 'age' else GENDER_LIST
            nlabels = len(label_list)
            #print('\n222222222222222\n')
            #print('Executing on %s' % FLAGS.device_id)
            model_fn = select_model(model_type)

            with tf.device(device):
                # sys.stdout = sys.__stdout__
                # sys.stderr = sys.__stderr__
                images = tf.placeholder(tf.float32,
                                        [None, RESIZE_FINAL, RESIZE_FINAL, 3])
                logits = model_fn(nlabels, images, 1, False)
                init = tf.global_variables_initializer()
                #print('\n333333333333333\n')
                requested_step = None

                checkpoint_path = '%s' % (model_dir)

                model_checkpoint_path, global_step = get_checkpoint(
                    checkpoint_path, requested_step, checkpoint)
                #print("\nglobal_step=",global_step)
                saver = tf.train.Saver()
                #print('\n44444444444444444\n')
                #print("PATH=",model_checkpoint_path,'\n')
                saver.restore(sess, model_checkpoint_path)
                #print('\n55555555555555555\n')
                softmax_output = tf.nn.softmax(logits)

                coder = ImageCoder()

                # Support a batch mode if no face detection model
                if len(files) == 0:
                    files.append(filename)
                    # If it happens to be a list file, read the list and clobber the files
                    if one_of(filename, ('csv', 'tsv', 'txt')):
                        files = batchlist(filename)

                for it, f in enumerate(files):
                    image_file = resolve_file(f)

                    if image_file is None: continue

                    try:
                        best_choice = classify(sess, label_list,
                                               softmax_output, coder, images,
                                               image_file)
                        #results[it][0]=f
                        #print('f=%s\nresult='%f,results)
                        print("\nClass_type=", class_type)
                        if class_type == 'age':
                            #print("\n%s\n"%it)
                            classes[it].age = best_choice[0]
                            # print(best_choice[0],'\n')
                            # print(results,'\n')
                            target.writerow(
                                (f, classes[it].gender, classes[it].age,
                                 '%.2f' % best_choice[1]))
                        if class_type == 'gender':
                            #print("\n222222222\n")
                            classes[it].name = f
                            classes[it].gender = best_choice[0]
                    except Exception as e:
                        print(e)
                        print('Failed to run image %s ' % image_file)
                    it += 1
                #if output is not None:
                #    output.close()
                # print(results)
                sess.close()
Пример #24
0
def main(argv=None):  # pylint: disable=unused-argument

    files = []

    if FLAGS.face_detection_model:
        print('Using face detector (%s) %s' %
              (FLAGS.face_detection_type, FLAGS.face_detection_model))
        face_detect = face_detection_model(FLAGS.face_detection_type,
                                           FLAGS.face_detection_model)
        face_files, rectangles = face_detect.run(FLAGS.filename)
        print(face_files)
        files += face_files

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:

        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (FLAGS.model_dir)

            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()

            # Support a batch mode if no face detection model
            if len(files) == 0:
                if (os.path.isdir(FLAGS.filename)):
                    for relpath in os.listdir(FLAGS.filename):
                        abspath = os.path.join(FLAGS.filename, relpath)

                        if os.path.isfile(abspath) and any([
                                abspath.endswith('.' + ty)
                                for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')
                        ]):
                            print(abspath)
                            files.append(abspath)
                else:
                    files.append(FLAGS.filename)
                    # If it happens to be a list file, read the list and clobber the files
                    if any([
                            FLAGS.filename.endswith('.' + ty)
                            for ty in ('csv', 'tsv', 'txt')
                    ]):
                        files = list_images(FLAGS.filename)

            writer = None
            output = None
            if FLAGS.target:
                print('Creating output file %s' % FLAGS.target)
                output = open(FLAGS.target, 'w')
                writer = csv.writer(output)
                writer.writerow(('file', 'label', 'score'))
            image_files = list(
                filter(lambda x: x is not None,
                       [resolve_file(f) for f in files]))
            print(image_files)
            if FLAGS.single_look:
                classify_many_single_crop(sess, label_list, softmax_output,
                                          coder, images, image_files, writer)

            else:
                for image_file in image_files:
                    print('image_file :', image_file)
                    print('images : ', images)
                    classify_one_multi_crop(sess, label_list, softmax_output,
                                            coder, images, image_file, writer)

            if output is not None:
                output.close()
Пример #25
0
def gender(argv=None):  # pylint: disable=unused-argument
    global isRunRecog
    global trackers
    global isNotStop
    time.sleep(4)
    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:

        label_list = GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % ('21936/')

            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            output = None

            ageListLocal = []
            #while not isRunRecog :
            #pass
            while isNotStop:
                try:
                    for index, track in enumerate(trackers):
                        sub_img = track.path.copy()
                        image_data = cv2.imencode('.jpg',
                                                  sub_img)[1].tostring()
                        image1 = coder.decode_jpeg(image_data)
                        crops = []
                        h = image1.shape[0]
                        w = image1.shape[1]
                        hl = h - RESIZE_FINAL
                        wl = w - RESIZE_FINAL

                        crop = tf.image.resize_images(
                            sub_img, (RESIZE_FINAL, RESIZE_FINAL))
                        crops.append(standardize_image(crop))
                        crops.append(tf.image.flip_left_right(crop))

                        corners = [(0, 0), (0, wl), (hl, 0), (hl, wl),
                                   (int(hl / 2), int(wl / 2))]
                        for corner in corners:
                            ch, cw = corner
                            cropped = tf.image.crop_to_bounding_box(
                                image1, ch, cw, RESIZE_FINAL, RESIZE_FINAL)
                            crops.append(standardize_image(cropped))
                            flipped = tf.image.flip_left_right(cropped)
                            crops.append(standardize_image(flipped))

                        image_batch = tf.stack(crops)

                        batch_results = sess.run(
                            softmax_output,
                            feed_dict={images: image_batch.eval()})
                        output = batch_results[0]
                        batch_sz = batch_results.shape[0]

                        for i in range(1, batch_sz):
                            output = output + batch_results[i]

                        output /= batch_sz
                        best = np.argmax(output)
                        best_choice = (label_list[best], output[best])
                        print('Guess @ 1 %s, prob = %.2f' % best_choice)
                        track.gender = best_choice
                except:
                    images = tf.placeholder(
                        tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
                    logits = model_fn(nlabels, images, 1, False)
                    init = tf.global_variables_initializer()

                    requested_step = FLAGS.requested_step if FLAGS.requested_step else None

                    checkpoint_path = '%s' % ('21936/')

                    model_checkpoint_path, global_step = get_checkpoint(
                        checkpoint_path, requested_step, FLAGS.checkpoint)

                    saver = tf.train.Saver()
                    saver.restore(sess, model_checkpoint_path)

                    softmax_output = tf.nn.softmax(logits)
Пример #26
0
def eval_once(saver,
              summary_writer,
              summary_op,
              logits,
              labels,
              num_eval,
              requested_step=None,
              name=''):
    """Run Eval once.
    Args:
    saver: Saver.
    summary_writer: Summary writer.
    top_k_op: Top K op.
    summary_op: Summary op.
    """
    top1 = tf.nn.in_top_k(logits, labels, 1)
    top2 = tf.nn.in_top_k(logits, labels, 2)
    correct_predict = tf.equal(tf.to_int32(tf.argmax(logits, 1)), labels)
    is_male = tf.equal(labels, 0)
    is_female = tf.equal(labels, 1)
    male_true = tf.reduce_sum(
        tf.to_float(tf.logical_and(is_male, correct_predict)))
    female_true = tf.reduce_sum(
        tf.to_float(tf.logical_and(is_female, correct_predict)))
    male_num = tf.reduce_sum(tf.to_float(is_male))
    female_num = tf.reduce_sum(tf.to_float(is_female))

    with tf.Session() as sess:
        checkpoint_path = '%s/run-%d' % (FLAGS.train_dir, FLAGS.run_id)

        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, FLAGS.checkpoint)

        saver.restore(sess, model_checkpoint_path)

        # Start the queue runners.
        coord = tf.train.Coordinator()
        try:
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess,
                                      coord=coord,
                                      daemon=True,
                                      start=True))
            num_steps = int(math.ceil(num_eval / FLAGS.batch_size))
            true_count1 = true_count2 = 0
            m_true_count = f_true_count = 0
            total_m = total_f = 0
            total_sample_count = num_steps * FLAGS.batch_size
            step = 0
            print(FLAGS.batch_size, num_steps)

            while step < num_steps and not coord.should_stop():
                start_time = time.time()
                v, predictions1, predictions2, m_true, m_num, f_true, f_num \
                     = sess.run([logits, top1, top2, male_true, male_num, female_true, female_num])

                duration = time.time() - start_time
                sec_per_batch = float(duration)
                examples_per_sec = FLAGS.batch_size / sec_per_batch

                true_count1 += np.sum(predictions1)
                true_count2 += np.sum(predictions2)
                m_true_count += m_true
                f_true_count += f_true
                total_m += m_num
                total_f += f_num
                format_str = ('%s (%.1f examples/sec; %.3f sec/batch)')
                print(format_str %
                      (datetime.now(), examples_per_sec, sec_per_batch))

                step += 1

            # Compute precision @ 1.

            precision1 = true_count1 / total_sample_count
            precision2 = true_count2 / total_sample_count
            precision_m = m_true_count / total_m
            precision_f = f_true_count / total_f
            print(
                '%s: precision @ 1 = %.3f (%d/%d)' %
                (datetime.now(), precision1, true_count1, total_sample_count))
            print(
                '%s: precision @ 2 = %.3f (%d/%d)' %
                (datetime.now(), precision2, true_count2, total_sample_count))
            print('%s: precision @ m = %.3f (%d/%d)' %
                  (datetime.now(), precision_m, m_true_count, total_m))
            print('%s: precision @ f = %.3f (%d/%d)' %
                  (datetime.now(), precision_f, f_true_count, total_f))

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            summary.value.add(tag='Precision @ 1' + name,
                              simple_value=precision1)
            summary.value.add(tag='Precision @ 2' + name,
                              simple_value=precision2)
            summary.value.add(tag='Precision @ m' + name,
                              simple_value=precision_m)
            summary.value.add(tag='Precision @ f' + name,
                              simple_value=precision_f)
            summary.value.add(tag='Precision @ mean' + name,
                              simple_value=(precision_f + precision_m) / 2)
            summary_writer.add_summary(summary, global_step)
        except Exception as e:  # pylint: disable=broad-except
            coord.request_stop(e)

        coord.request_stop()
        coord.join(threads, stop_grace_period_secs=10)
Пример #27
0
def main(argv=None):  # pylint: disable=unused-argument

    # FIXME: Test by putting in multiple files in here.
    # files = []
    files = load_imgs(
        '/Users/parimarjann/projects/face_recognizer/data/vgg_face_dataset/dataset_images'
    )
    random.seed(1234)
    files = random.sample(files, 100)
    print('single look: ', FLAGS.single_look)

    if FLAGS.face_detection_model:
        print('Using face detector (%s) %s' %
              (FLAGS.face_detection_type, FLAGS.face_detection_model))
        face_detect = face_detection_model(FLAGS.face_detection_type,
                                           FLAGS.face_detection_model)
        face_files, rectangles = face_detect.run(FLAGS.filename)
        print(face_files)
        files += face_files

    with tf.Session() as sess:

        #tf.reset_default_graph()
        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (FLAGS.model_dir)

            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()
            # Support a batch mode if no face detection model
            if len(files) == 0:
                files.append(FLAGS.filename)
                # If it happens to be a list file, read the list and clobber the files
                if one_of(FLAGS.filename, ('csv', 'tsv', 'txt')):
                    files = batchlist(FLAGS.filename)

            writer = None
            output = None
            if FLAGS.target:
                print('Creating output file %s' % FLAGS.target)
                output = open(FLAGS.target, 'w')
                writer = csv.writer(output)
                writer.writerow(('file', 'label', 'score'))

            for f in files:
                image_file = resolve_file(f)
                image_data = cv2.imread(image_file)
                image_data = cv2.cvtColor(image_data, cv2.COLOR_RGB2BGR)
                image_data = cv2.imencode('.jpeg', image_data)[1].tostring()

                if image_file is None:
                    continue

                try:
                    best_choice = classify(sess, label_list, softmax_output,
                                           coder, images, image_data)
                    # if writer is not None:
                    # writer.writerow((f, best_choice[0], '%.2f' % best_choice[1]))
                    print(f)
                    print(best_choice)
                except Exception as e:
                    print('exception!')
                    print(e)
                    print('Failed to run image %s ' % image_file)

            if output is not None:
                output.close()
Пример #28
0
def main(argv=None):  # pylint: disable=unused-argument

    files = []
    
    if FLAGS.face_detection_model:
        print('Using face detector (%s) %s' % (FLAGS.face_detection_type, FLAGS.face_detection_model))
        face_detect = face_detection_model(FLAGS.face_detection_type, FLAGS.face_detection_model)
        face_files, rectangles = face_detect.run(FLAGS.filename)
        print(face_files)
        files += face_files


    with tf.Session() as sess:

        #tf.reset_default_graph()
        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):
            
            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()
            
            requested_step = FLAGS.requested_step if FLAGS.requested_step else None
        
            checkpoint_path = '%s' % (FLAGS.model_dir)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
            
            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)
                        
            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()

            # Support a batch mode if no face detection model
            if len(files) == 0:
                files.append(FLAGS.filename)
                # If it happens to be a list file, read the list and clobber the files
                if one_of(FLAGS.filename, ('csv', 'tsv', 'txt')):
                    files = batchlist(FLAGS.filename)

            writer = None
            output = None
            if FLAGS.target:
                print('Creating output file %s' % FLAGS.target)
                output = open(FLAGS.target, 'w')
                writer = csv.writer(output)
                writer.writerow(('file', 'label', 'score'))


            for f in files:
                image_file = resolve_file(f)
            
                if image_file is None: continue

                try:
                    best_choice = classify(sess, label_list, softmax_output, coder, images, image_file)
                    if writer is not None:
                        writer.writerow((f, best_choice[0], '%.2f' % best_choice[1]))
                except Exception as e:
                    print(e)
                    print('Failed to run image %s ' % image_file)

            if output is not None:
                output.close()
Пример #29
0
    def predict(self,
                image_file=None,
                mode=0,
                image_bound=None,
                use_tf_to_read=True):
        model_dir = self.model_list[mode]
        class_type = self.class_list[mode]
        files = []

        config = tf.ConfigProto(allow_soft_placement=True)
        with tf.Session(config=config) as sess:

            label_list = self.AGE_LIST if class_type == 'age' else self.GENDER_LIST
            nlabels = len(label_list)

            model_fn = select_model(self.model_type)

            with tf.device(self.device_id):

                images = tf.placeholder(
                    tf.float32,
                    [None, self.RESIZE_FINAL, self.RESIZE_FINAL, 3])
                logits = model_fn(nlabels, images, 1, False)
                # init = tf.global_variables_initializer()

                requested_step = self.requested_step if self.requested_step else None

                checkpoint_path = '%s' % (model_dir)

                model_checkpoint_path, global_step = get_checkpoint(
                    checkpoint_path, requested_step, self.checkpoint)

                saver = tf.train.Saver()
                saver.restore(sess, model_checkpoint_path)

                softmax_output = tf.nn.softmax(logits)

                coder = ImageCoder()

                # Support a batch mode if no face detection model
                # if len(files) == 0:
                #     if (os.path.isdir(filename)):
                #         for relpath in os.listdir(filename):
                #             abspath = os.path.join(filename, relpath)
                #             print(abspath)
                #             if os.path.isfile(abspath) and any(
                #                     [abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
                #                 print(abspath)
                #                 files.append(abspath)
                #     else:
                #         files.append(filename)
                #         # If it happens to be a list file, read the list and clobber the files
                #         if any([filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
                #             files = self.list_images(filename)

                writer = None
                # output = None
                # if self.target:
                #     output = open(self.target, 'w')
                #     writer = csv.writer(output)
                #     writer.writerow(('file', 'label', 'score'))
                # image_files = list(filter(lambda x: x is not None, [self.resolve_file(f) for f in files]))
                # if self.single_look:
                #     self.classify_many_single_crop(sess, label_list, softmax_output, coder, images, image_files, writer)
                #
                # else:
                # for image_file in filename:
                best_choice, second_choice = self.classify_one_multi_crop(
                    sess,
                    label_list,
                    softmax_output,
                    coder,
                    images,
                    image_file,
                    writer,
                    image_bound=image_bound,
                    use_tf_to_read=use_tf_to_read)

                # if output is not None:
                #     output.close()
        return best_choice, second_choice
# Creating different graphs
g1 = tf.Graph()
g2 = tf.Graph()

# AGE MODEL
with g1.as_default():
    session = tf.Session(graph=g1)
    with session.as_default():
        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        ageNet = select_model('default')
        logits = ageNet(len(AGE_LIST), images, 1, False)
        requested_step = 14999
        checkpoint_path = '%s' % ('C:/Users/LEE/Desktop/rude-carnie/age_model')
        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, 'checkpoint')
        age_saver = tf.train.Saver()
        age_saver.restore(session, model_checkpoint_path)

# GENDER MODEL
with g2.as_default():
    session2 = tf.Session(graph=g2)
    with session2.as_default():
        images2 = tf.placeholder(tf.float32,
                                 [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        genderNet = select_model('default')
        logits2 = genderNet(len(GENDER_LIST), images2, 1, False)
        requested_step2 = 29999
        checkpoint_path2 = '%s' % (
            'C:/Users/LEE/Desktop/rude-carnie/gender_model')
        model_checkpoint_path2, global_step = get_checkpoint(
Пример #31
0
def main(argv=None):

	files = []

	#if FLAGS.facedetect_model_type is 'MTCNN':
	#	print('Using face detector %s' %(FLAGS.facedetect_model_type))
	#	img = cv2.imread(FLAGS.filename)
	#	img_resize = cv2.resize(img, (640,480))
	#	img_resize = cv2.cvtColor(img_resize, cv2.COLOR_BGR2RGB)
	#	faceimagesequence, faceimagelocation, faceimagelandmarks, numberoffaces = MTCNNDetectFace(image)

	# Load Model
	if FLAGS.multitask:
		config = tf.ConfigProto(allow_soft_placement=True)
		with tf.Session(config=config) as sess:
			
			age_nlabels = len(AGE_LIST)
			gender_nlabels = len(GENDER_LIST)

			print('Executing on %s ' % FLAGS.device_id)
			model_fn = select_model(FLAGS.model_type)

			with tf.device(FLAGS.device_id):
				images = tf.placeholder(tf.float32, [None,RESIZE_FINAL,RESIZE_FINAL, 3], name='input')
				agelogits, genderlogits = model_fn(age_nlabels,images, gender_nlabels, images, 1, False)
				init = tf.global_variables_initializer()
				requested_step = FLAGS.requested_step if FLAGS.requested_step else None
				checkpoint_path = '%s' % (FLAGS.model_dir)
				model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
				saver = tf.train.Saver()
				saver.restore(sess, model_checkpoint_path)
				softmax_age_output = tf.nn.softmax(agelogits, name='ageoutput')
				softmax_gender_output = tf.nn.softmax(genderlogits, name='genderoutput')
				print (softmax_age_output)
				print (softmax_gender_output)

				coder = ImageCoder()
				#support a batch mode if no face detection model
				if len(files) == 0:
					if (os.path.isdir(FLAGS.filename)):
						for relpath in os.listdir(FLAGS.filename):
							abspath = os.path.join(FLAGS.filename, relpath)

							if os.path.isfile(abspath) and any([abspath.endswith('.'+ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
								print(abspath)
								files.append(abspath)

					else:
						files.append(FLAGS.filename)
						# if it happens to be a list file, read the list and clobber the files
						if any([FLAGS.filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
							files = list_images(FLAGS.filename)

				writer = None
				output = None
				if FLAGS.resultfile:
					print('Creating output file %s ' % FLAGS.resultfile)
					output = open(FLAGS.resultfile, 'w')
					writer = csv.writer(output)
					writer.writerow(('file', 'label', 'score'))

				image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
				print(image_files)

				if FLAGS.single_look:

					classify_many_single_crop(sess, AGE_LIST, softmax_age_output,
						coder, images, image_files, writer)
					classify_many_single_crop(sess, GENDER_LIST, softmax_gender_output,
						coder, images, image_files, writer)
					
				else:

					for image_file in image_files:
						classify_one_multi_crop(sess, AGE_LIST, softmax_age_output,
							coder, images, image_file, writer)
						classify_one_multi_crop(sess, GENDER_LIST, softmax_gender_output,
							coder, images, image_file, writer)


				if output is not None:
					output.close()


				if FLAGS.convertpb:
					# retrieve the protobuf graph definition
					graph = tf.get_default_graph()
					input_graph_def = graph.as_graph_def()
					output_node_names = 'ageoutput_1,genderoutput_1'
					output_graph_def = graph_util.convert_variables_to_constants(
						sess, #The session is used to retrieve the weights
						input_graph_def, #The graph_def is used to retrieve the nodes
						output_node_names.split(",") #The output node names are used to select the usefull nodes
					)

					# finally we serialize and dump the output graph to the filesystem
					output_pb_file = FLAGS.model_dir+os.sep+FLAGS.model_type+'.pb'
					with tf.gfile.GFile(output_pb_file, "wb") as f:
						f.write(output_graph_def.SerializeToString())
					print("%d ops in the final graph." % len(output_graph_def.node) )

	else:

		config = tf.ConfigProto(allow_soft_placement=True)
		with tf.Session(config=config) as sess:
			
			if FLAGS.class_type == 'Age':
				label_list = AGE_LIST
			elif FLAGS.class_type == 'Gender':
				label_list = GENDER_LIST			
			nlabels = len(label_list)

			print('Executing on %s' % FLAGS.device_id)
			model_fn = select_model(FLAGS.model_type)

			with tf.device(FLAGS.device_id):
				images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3],name='input')
				logits = model_fn(nlabels, images, 1, False)
				init = tf.global_variables_initializer()

				requested_step = FLAGS.requested_step if FLAGS.requested_step else None

				checkpoint_path = '%s' % (FLAGS.model_dir)

				model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)

				saver = tf.train.Saver()
				saver.restore(sess, model_checkpoint_path)

				if FLAGS.class_type == 'Age':
					softmax_output = tf.nn.softmax(logits, name='ageoutput')
				elif FLAGS.class_type == 'Gender':
					softmax_output = tf.nn.softmax(logits, name='genderoutput')

				coder = ImageCoder()

				# Support a batch mode if no face detection model
				if len(files) == 0:

					if (os.path.isdir(FLAGS.filename)):
						for relpath in os.listdir(FLAGS.filename):
							abspath = os.path.join(FLAGS.filename, relpath)

							if os.path.isfile(abspath) and any([abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
								print(abspath)
								files.append(abspath)
					else:
						files.append(FLAGS.filename)
						# If it happens to be a list file, read the list and clobber the files
						if any([FLAGS.filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
							files = list_images(FLAGS.filename)

				writer = None
				output = None
				if FLAGS.resultfile:
					print('Creating output file %s ' % FLAGS.resultfile)
					output = open(FLAGS.resultfile, 'w')
					writer = csv.writer(output)
					writer.writerow(('file', 'label', 'score'))

				image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
				print(image_files)

				if FLAGS.single_look:
					classify_many_single_crop(sess, label_list, softmax_output, coder, images, image_files, writer)
				else:
					for image_file in image_files:
						classify_one_multi_crop(sess, label_list, softmax_output, coder, images, image_file, writer)

				if output is not None:
					output.close()

				if FLAGS.convertpb:
					# retrieve the protobuf graph definition
					graph = tf.get_default_graph()
					input_graph_def = graph.as_graph_def()
					if FLAGS.class_type == 'Age':
						output_node_names = 'ageoutput'
					elif FLAGS.class_type == 'Gender':
						output_node_names = 'genderoutput'

					output_graph_def = graph_util.convert_variables_to_constants(
						sess, #The session is used to retrieve the weights
						input_graph_def, #The graph_def is used to retrieve the nodes
						output_node_names.split(",") #The output node names are used to select the usefull nodes
					)

					# finally we serialize and dump the output graph to the filesystem
					output_pb_file = FLAGS.model_dir+os.sep+FLAGS.model_type+'_'+FLAGS.class_type+'.pb'
					with tf.gfile.GFile(output_pb_file, "wb") as f:
						f.write(output_graph_def.SerializeToString())
					print("%d ops in the final graph." % len(output_graph_def.node) )
Пример #32
0
def guess(model_dir='./21936', class_type='genda', model_type='inception',filename='', device_id='/cpu:0', requested_step='', target='future_genda_prediction', checkpoint='14999',  face_detection_model='', face_detection_type='cascade',count=''):

    files = []
    
    if face_detection_model:
        print('Using face detector (%s) %s' % (face_detection_type, face_detection_model))
        face_detect = face_detection_model(face_detection_type, face_detection_model)
        face_files, rectangles = face_detect.run(filename)
        print(face_files)
        files += face_files

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:

        if class_type == 'genda':
            label_list = GENDER_LIST
        elif class_type == 'age':
            label_list = AGE_LIST
        nlabels = len(label_list)

        model_fn = select_model(model_type)

        with tf.device(device_id):
            
            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False,count)
            init = tf.global_variables_initializer()
            
            requested_step = requested_step if requested_step else None
        
            checkpoint_path = '%s' % (model_dir)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, checkpoint)
            #加载tf模型的方法
            if count==0:
                startT=time.time()
                saver = tf.train.Saver()
                saver.restore(sess, model_checkpoint_path)
                endT = time.time()
                loadms = (endT - startT) * 1000
                print(loadms)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()

            # Support a batch mode if no face detection model
            if len(files) == 0:
                if (os.path.isdir(filename)):
                    for relpath in os.listdir(filename):
                        abspath = os.path.join(filename, relpath)
                        
                        if os.path.isfile(abspath) and any([abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
                            print(abspath)
                            files.append(abspath)
                else:
                    files.append(filename)
                    # If it happens to be a list file, read the list and clobber the files
                    if any([filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
                        files = list_images(filename)
                
            writer = None
            output = None
            if target:
                print('Creating output file %s' % target)
                output = open(target, 'w')
                writer = csv.writer(output)
                writer.writerow(('file', 'label', 'score'))
            image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
            print(image_files)
            for image_file in image_files:     #需要将files中的多个结果综合一下
                classify_one_multi_crop(sess, init,label_list, softmax_output, coder, images, image_file, writer)

            if output is not None:
                output.close()