Пример #1
0
    def __init__(self, model_dir, class_type):
        self.graph = tf.Graph()
        # config = tf.ConfigProto(allow_soft_placement=True)
        # self.sess = tf.Session(config=config, graph=self.graph)
        self.sess = tf.Session(graph=self.graph)
        with self.graph.as_default():
            # import saved model from loc into local graph
            label_list = AGE_LIST if class_type == 'age' else GENDER_LIST
            nlabels = len(label_list)
            if class_type == "age":
                model_fn = select_model(FLAGS.age_model_type)
            else:
                model_fn = select_model(FLAGS.sex_model_type)

            self.images = tf.placeholder(tf.float32,
                                         [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, self.images, 1, False)
            # init = tf.global_variables_initializer()
            saver = tf.train.Saver()
            requested_step = FLAGS.requested_step if FLAGS.requested_step else None
            checkpoint_path = '%s' % (model_dir)
            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)
            saver.restore(self.sess, model_checkpoint_path)
            self.softmax_output = tf.nn.softmax(logits)
Пример #2
0
def evaluate(run_dir):
    with tf.Graph().as_default() as g:
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        eval_data = FLAGS.eval_data == 'valid'
        num_eval = md['%s_counts' % FLAGS.eval_data]

        model_fn = select_model(FLAGS.model_type)


        with tf.device(FLAGS.device_id):
            print('Executing on %s' % FLAGS.device_id)
            images, labels, _ = inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, train=not eval_data, num_preprocess_threads=FLAGS.num_preprocess_threads)
            logits = model_fn(md['nlabels'], images, 1, False)
            summary_op = tf.summary.merge_all()
            
            summary_writer = tf.summary.FileWriter(run_dir, g)
            saver = tf.train.Saver()
            
            if FLAGS.requested_step_seq:
                sequence = FLAGS.requested_step_seq.split(',')
                for requested_step in sequence:
                    print('Running %s' % sequence)
                    eval_once(saver, summary_writer, summary_op, logits, labels, num_eval, requested_step)
            else:
                while True:
                    print('Running loop')
                    eval_once(saver, summary_writer, summary_op, logits, labels, num_eval)
                    if FLAGS.run_once:
                        break
                    time.sleep(FLAGS.eval_interval_secs)
Пример #3
0
def evaluate(run_dir):
    with tf.Graph().as_default() as g:
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        #print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        eval_data = FLAGS.eval_data == 'valid'
        num_eval = md['%s_counts' % FLAGS.eval_data]

        model_fn = select_model(FLAGS.model_type)


        with tf.device(FLAGS.device_id):
            #print('Executing on %s' % FLAGS.device_id)
            images, labels, _ = inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, train=not eval_data, num_preprocess_threads=FLAGS.num_preprocess_threads)
            logits = model_fn(md['nlabels'], images, 1, False)
            summary_op = tf.summary.merge_all()
            
            summary_writer = tf.summary.FileWriter(run_dir, g)
            saver = tf.train.Saver()
            
            if FLAGS.requested_step_seq:
                sequence = FLAGS.requested_step_seq.split(',')
                for requested_step in sequence:
                    #print('Running %s' % sequence)
                    eval_once(saver, summary_writer, summary_op, logits, labels, num_eval, requested_step)
            else:
                while True:
                    #print('Running loop')
                    eval_once(saver, summary_writer, summary_op, logits, labels, num_eval)
                    if FLAGS.run_once:
                        break
                    time.sleep(FLAGS.eval_interval_secs)
Пример #4
0
    def __init__(
            self,
            model_dir='/usr/src/app/deps/rude-carnie/inception_gender_checkpoint',
            model_type='inception',
            class_type='gender'):
        '''
        Just a wrapper around guess.py.
        '''
        self.model_dir = model_dir
        self.model_type = model_type
        self.class_type = class_type
        self.sess = tf.Session()
        model_fn = select_model(self.model_type)
        self.label_list = AGE_LIST if self.class_type == 'age' else GENDER_LIST
        nlabels = len(self.label_list)
        self.images = tf.placeholder(tf.string, [None])
        standardize = tf.map_fn(self.decode, self.images, dtype=tf.float32)
        logits = model_fn(nlabels, standardize, 1, False)
        init = tf.global_variables_initializer()

        requested_step = None  #FLAGS.requested_step if FLAGS.requested_step else None

        checkpoint_path = '%s' % (self.model_dir)
        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, None)
        #FLAGS.checkpoint)

        saver = tf.train.Saver()
        saver.restore(self.sess, model_checkpoint_path)

        self.softmax_output = tf.nn.softmax(logits)

        self.coder = ImageCoder()
Пример #5
0
def guess_loop(class_type, model_type, model_dir, q, guess_q):
    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:
        label_list = AGE_LIST if class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        model_fn = select_model(model_type)

        with tf.device('/cpu:0'):
            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)

            init = tf.global_variables_initializer()

            checkpoint_path = '%s' % (model_dir)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, None, 'checkpoint')

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)


            while True:
                image_files = q.get()
                guess_result = []

                for image in image_files:
                    guess = classify_one_multi_crop(sess, label_list, softmax_output, images, image)
                    if guess is not None:
                        guess_result.append(guess)

                if guess_q.full() is not True:
                    guess_q.put(guess_result)
def model_prediction_view():

    display_image('undraw_viral_tweet_gndb', '')
    st.info("Classification with Machine Learning Models")
    st.markdown("""
        ### Use some of the built-in models to find out the sentiment of your tweet.
        #### **Instructions:**
        1. Simply pick a model you would like used to classify your tweet from the dropdown menu
        2. Type in your tweet on the text area
        3. Click the 'Classify' button and see your results below.
    """)

    # Creating sidebar with selection box -
    # you can create multiple pages this way
    models = ("Logistic_regression", "Linear SVC", "SVM")
    st.subheader('Pick a model to classify your text:')
    chosen = st.selectbox('', models)

    if chosen in models:
        select_model(chosen)
def guessGender(path):  # pylint: disable=unused-argument

    with tf.Session() as sess:

        # tf.reset_default_graph()
        label_list = GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (GENDER_MODEL_PATH)

            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)
            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()
            files = deal_file.get_files(path)
            font = cv2.FONT_HERSHEY_SIMPLEX

            try:
                for f in files:
                    best_choice = classify(sess, label_list, softmax_output,
                                           coder, images, f)
                    # print(best_choice)
                    pic = cv2.imread(f)
                    fname = f[:-4] + '_test' + f[-4:]
                    cv2.putText(pic, best_choice[0], (5, 40), font, 1,
                                (100, 255, 50), 2, cv2.LINE_AA)
                    cv2.imwrite(fname, pic)
                    print(best_choice)

            except Exception as e:
                print(e)
                print('Failed to run image %s ' % file)
Пример #8
0
def run_inference_on_image(image_data):
    config = tf.ConfigProto(allow_soft_placement=True)
    tf.logging.set_verbosity(tf.logging.WARN)
    with tf.Session(config=config) as sess:

        softmax_output = sess.graph.get_tensor_by_name('labels_softmax:0')
        images = sess.graph.get_tensor_by_name('Placeholder:0')
        label_list = AGE_LIST
        nlabels = len(label_list)
        model_fn = select_model('inception_v3')

        #print('Executing on %s' % FLAGS.device_id)
        image_bytes = download_image(url)
        coder = ImageCoder()
        return classify_many_single_crop(sess, label_list, softmax_output,
                                         coder, images, image_bytes)
    def extract(self):
        with tf.Session(config=self.config) as sess:
            nlabels = len(self.label_list)
            model_fn = select_model(FLAGS.model_type)
            with tf.device(FLAGS.device_id):
                logits = model_fn(nlabels, images, 1, False)
                images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
                init = tf.global_variables_initializer()
                requested_step = FLAGS.requested_step if FLAGS.requested_step else None
                checkpoint_path = '%s' % (FLAGS.model_dir)
                model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
                saver = tf.train.Saver()
                saver.restore(sess, model_checkpoint_path)
                softmax_output = tf.nn.softmax(logits)
                coder = ImageCoder()
                # Support a batch mode if no face detection model
                if len(files) == 0:
                    if (os.path.isdir(FLAGS.filename)):
                        for relpath in os.listdir(FLAGS.filename):
                            abspath = os.path.join(FLAGS.filename, relpath)
                            
                            if os.path.isfile(abspath) and any([abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
                                print(abspath)
                                files.append(abspath)
                    else:
                        files.append(FLAGS.filename)
                        # If it happens to be a list file, read the list and clobber the files
                        if any([FLAGS.filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
                            files = list_images(FLAGS.filename)
                writer = None
                output = None
                if FLAGS.target:
                    print('Creating output file %s' % FLAGS.target)
                    output = open(FLAGS.target, 'w')
                    writer = csv.writer(output)
                    writer.writerow(('file', 'label', 'score'))
                image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
                print(image_files)
                if FLAGS.single_look:
                    classify_many_single_crop(sess, label_list, softmax_output, coder, images, image_files, writer)

                else:
                    for image_file in image_files:
                        classify_one_multi_crop(sess, label_list, softmax_output, coder, images, image_file, writer)

                if output is not None:
                    output.close()
def guessGender(path):  # pylint: disable=unused-argument
# 检测文件夹中所有照片的性别

    with tf.Session() as sess:

        # tf.reset_default_graph()
        label_list = GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (GENDER_MODEL_PATH)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()
            files = get_files(path)
            gender_dict = {}

            try:
                for f in files:
                    best_choice = classify(sess, label_list, softmax_output, coder, images, f)
                    # print(best_choice)
                    gender_dict[f[len(path) + 1:]] = best_choice
                    return(best_choice)


            except Exception as e:
                print(e)
                print('Failed to run image %s ' % file)
Пример #11
0
def guessAge(image_file):

    #import!!!Fix the bug http://stackoverflow.com/questions/33765336/remove-nodes-from-graph-or-reset-entire-default-graph
    tf.reset_default_graph()
    with tf.Session() as sess:

        age_label_list = AGE_LIST
        agelabels = len(age_label_list)

        # print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model('inception')

        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits_age = model_fn(agelabels, images, 1, False)
        init = tf.global_variables_initializer()

        requested_step = FLAGS.requested_step if FLAGS.requested_step else None

        checkpoint_path = '%s' % (AGE_MODEL_PATH)
        # update in 0.11 version

        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, FLAGS.checkpoint)
    #print 'model_checkpoint_path is', model_checkpoint_path
    #print model_checkpoint_path
    saver = tf.train.Saver()
    if not saver.last_checkpoints:
        saver.restore(sess, model_checkpoint_path)

    softmax_output = tf.nn.softmax(logits_age)

    coder = ImageCoder()

    files = []

    # detect age
    best_choice = classify(sess, age_label_list, softmax_output, coder, images,
                           image_file)

    sess.close()
    return best_choice
Пример #12
0
def main(argv=None):  # pylint: disable=unused-argument

    with tf.Session() as sess:

        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits = model_fn(nlabels, images, 1, False)
        init = tf.initialize_all_variables()

        requested_step = FLAGS.requested_step if FLAGS.requested_step else None

        checkpoint_path = '%s' % (FLAGS.model_dir)

        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, FLAGS.checkpoint)

        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)

        softmax_output = tf.nn.softmax(logits)

        coder = ImageCoder()

        files = []

        if FLAGS.face_detection_model:
            print('Using face detector %s' % FLAGS.face_detection_model)
            face_detect = FaceDetector(FLAGS.face_detection_model)
            face_files, rectangles = face_detect.run(FLAGS.filename)
            files += face_files

        if len(files) == 0:
            files.append(FLAGS.filename)

        for f in files:
            classify(sess, label_list, softmax_output, coder, images, f)
Пример #13
0
def guessGender(image_file):
    tf.reset_default_graph()
    with tf.Session() as sess:

        #sess = tf.Session()
        age_label_list = AGE_LIST
        gender_label_list = GENDER_LIST
        genderlabels = len(gender_label_list)

        # print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model('')

        with tf.device(FLAGS.device_id):

            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits_gender = model_fn(genderlabels, images, 1, False)
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (GENDER_MODEL_PATH)

            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)

            softmax_output = tf.nn.softmax(logits_gender)

            coder = ImageCoder()

            files = []

            # detect gender
            #try:
            best_choice = classifyGender(sess, gender_label_list,
                                         softmax_output, coder, images,
                                         image_file)
            return best_choice
Пример #14
0
def guessGender(image_file):
    with tf.Session() as sess:

        sess = tf.Session()
        age_label_list = AGE_LIST
        gender_label_list = GENDER_LIST
        genderlabels = len(gender_label_list)

        # print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model('inception')

        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits_gender = model_fn(genderlabels, images, 1, False)
        init = tf.global_variables_initializer()

        requested_step = FLAGS.requested_step if FLAGS.requested_step else None

        checkpoint_path = '%s' % (FLAGS.model_dir)

        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, FLAGS.checkpoint)

        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)

        softmax_output = tf.nn.softmax(logits_gender)

        coder = ImageCoder()

        files = []

        # detect age
        try:
            best_choice = classify(sess, gender_label_list, softmax_output,
                                   coder, images, image_file)
            return best_choice
        except Exception as e:
            print(e)
            print('Failed to run image %s ' % image_file)
Пример #15
0
def model_init(sess, model_path, label_list):

    model_checkpoint_path, global_step = get_checkpoint(
        model_path, None, FLAGS.checkpoint)

    nlabels = len(label_list)
    model_fn = select_model(FLAGS.model_type)
    images_placeholder = tf.placeholder(tf.float32,
                                        [None, RESIZE_FINAL, RESIZE_FINAL, 3])

    with tf.device(FLAGS.device_id):

        logits = model_fn(nlabels, images_placeholder, 1, False)

        init = tf.global_variables_initializer()

        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)

        softmax_output = tf.nn.softmax(logits)

        return softmax_output, images_placeholder
Пример #16
0
 def __init__(self,session_conf,class_type,model_dir,requested_step='',model_type='inception'):
     self.graph=tf.Graph()#为每个类(实例)单独创建一个graph
     self.session_conf=session_conf
     self.sess=tf.Session(graph=self.graph,config=self.session_conf)#创建新的sess
     self.label_list = AGE_LIST if class_type == 'age' else GENDER_LIST
     self.model_dir=model_dir
     self.requested_step=requested_step
     with self.sess.as_default():
          with self.graph.as_default():                    
                 nlabels = len(self.label_list)
                 print('Executing on %s' % device_id)
                 model_fn = select_model(model_type)
                 with tf.device(device_id):
                     self.images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
                     logits = model_fn(nlabels, self.images, 1, False)
                     init = tf.global_variables_initializer()
                     requested_step = self.requested_step if self.requested_step else None
                     checkpoint_path = '%s' % (self.model_dir)
                     ######gender_model_dir
                     model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, checkpoint)                                   
                     self.saver = tf.train.Saver()
                     self.saver.restore(self.sess, model_checkpoint_path)#从恢复点恢复参数                       
                     self.softmax_output = tf.nn.softmax(logits)
                     self.coder = ImageCoder()
Пример #17
0
def main(argv=None):
    with tf.Graph().as_default():

        model_fn = select_model(FLAGS.model_type)
        # Open the metadata file and figure out nlabels, and size of epoch
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        images, labels, _ = distorted_inputs(FLAGS.train_dir, FLAGS.batch_size,
                                             FLAGS.image_size,
                                             FLAGS.num_preprocess_threads)
        logits = model_fn(md['nlabels'], images, 1 - FLAGS.pdrop, True)
        total_loss = loss(logits, labels)
        ini_global_step = 0

        train_op = optimizer(FLAGS.optim, FLAGS.eta, total_loss,
                             FLAGS.steps_per_decay, FLAGS.eta_decay_rate)
        saver = tf.train.Saver(tf.global_variables())
        summary_op = tf.summary.merge_all()

        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement))

        tf.global_variables_initializer().run(session=sess)

        # This is total hackland, it only works to fine-tune iv3
        if FLAGS.pre_model:
            inception_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
                                                    scope="InceptionV3")
            restorer = tf.train.Saver(inception_variables)
            restorer.restore(sess, FLAGS.pre_model)

        if FLAGS.pre_checkpoint_path:
            if tf.gfile.Exists(FLAGS.pre_checkpoint_path) is True:
                print('Trying to restore checkpoint from %s' %
                      FLAGS.pre_checkpoint_path)
                restorer = tf.train.Saver()
                restorer.restore(
                    sess,
                    tf.train.latest_checkpoint(FLAGS.pre_checkpoint_path))
                print('%s: Pre-trained model restored from %s' %
                      (datetime.now(), FLAGS.pre_checkpoint_path))
                ini_global_step = get_restored_step(FLAGS.pre_checkpoint_path)
                print('Initail Global Step is {}'.format(ini_global_step))

        run_dir = '%s/run-%d' % (FLAGS.train_dir, os.getpid())

        checkpoint_path = '%s/%s' % (run_dir, FLAGS.checkpoint)
        if tf.gfile.Exists(run_dir) is False:
            print('Creating %s' % run_dir)
            tf.gfile.MakeDirs(run_dir)

        tf.train.write_graph(sess.graph_def, run_dir, 'model.pb', as_text=True)

        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(run_dir, sess.graph)
        steps_per_train_epoch = int(md['train_counts'] / FLAGS.batch_size)
        num_steps = FLAGS.max_steps if FLAGS.epochs < 1 else FLAGS.epochs * steps_per_train_epoch
        print('Requested number of steps [%d]' % num_steps)

        for step in xrange(num_steps):
            step += ini_global_step
            start_time = time.time()
            _, loss_value = sess.run([train_op, total_loss])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = (
                    '%s: step %d, loss = %.3f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))

            # Loss only actually evaluated every 100 steps?
            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            if step % 1000 == 0 or (step + 1) == num_steps:
                saver.save(sess, checkpoint_path, global_step=step)
Пример #18
0
def detectUndetectedPersons(outfile, undetected_persons):
    #RUDE CARNIE DEFAULTS

    print("starting the process to detect people's age and gender...")

    gender_model_dir = "./age_and_gender_detection/pretrained_checkpoints/gender/"
    age_model_dir = "./age_and_gender_detection/pretrained_checkpoints/age/"
    # What processing unit to execute inference on
    device_id = '/device:GPU:0'
    # Checkpoint basename
    checkpoint = 'checkpoint'
    model_type = 'inception'

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    config = tf.ConfigProto(allow_soft_placement=True)
    # gpu_options=gpu_options)
    # config = tf.ConfigProto(allow_soft_placement=True)

    with tf.Session(config=config) as sess:
        #Age detection model
        n_ages = len(AGE_LIST)
        age_model_fn = select_model(model_type)

        #Gender detection model
        n_genders = len(GENDER_LIST)
        gender_model_fn = select_model(model_type)

        print("initializing the model to detect age and gender")

        with tf.device(device_id):
            print "initializing the model to detect age and gender using ", str(
                device_id)
            images = tf.placeholder(tf.float32,
                                    [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            requested_step = None
            init = tf.global_variables_initializer()

            #age model
            age_logits = age_model_fn("age", n_ages, images, 1, False)
            age_checkpoint_path, global_step = get_checkpoint(
                age_model_dir, requested_step, checkpoint)
            age_vars = set(tf.global_variables())
            saver_age = tf.train.Saver(list(age_vars))
            saver_age.restore(sess, age_checkpoint_path)
            age_softmax_output = tf.nn.softmax(age_logits)

            #gender_model
            gender_logits = gender_model_fn("gender", n_genders, images, 1,
                                            False)
            gender_checkpoint_path, global_step = get_checkpoint(
                gender_model_dir, requested_step, checkpoint)
            gender_vars = set(tf.global_variables()) - age_vars
            saver_gender = tf.train.Saver(list(gender_vars))
            saver_gender.restore(sess, gender_checkpoint_path)
            gender_softmax_output = tf.nn.softmax(gender_logits)

            coder = ImageCoder()

            writer = None

            print(
                "starting the loop for detecting age and gender in each frame")
            time.sleep(
                15)  # sleep to allow the tensor flow/rude carnie stuff to load
            for person_name, person_img in undetected_persons:
                outfile.write("%s%s%s"%(person_name, getAgeAndGender(person_name, person_img, sess, coder, images,\
                        writer, AGE_LIST, GENDER_LIST, age_softmax_output,\
                        gender_softmax_output), '\n'))
Пример #19
0
def test(config):
    with open(config.word_emb_file, "r") as fh:
        word_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.char_emb_file, "r") as fh:
        char_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.dev_eval_file, "r") as fh:
        eval_file = json.load(fh)
    with open(config.dev_example, "r") as fh:
        example_file = json.load(fh)
    # # with open(config.test_eval_file, "r") as fh:
    # #     eval_file = json.load(fh)
    #
    # # dp_test = DataProcessor('test', config)
    # # total = dp_test.get_data_size()
    dp_test = DataProcessor('test', config, is_test=True)
    total = dp_test.get_data_size()
    print("Loading model...")

    sess_config = tf.ConfigProto(allow_soft_placement=True)
    sess_config.gpu_options.allow_growth = True

    with tf.Session(config=sess_config) as sess:
        selector = select_model(sess, config, word_mat, char_mat)
        spaner = span_model(sess, config, word_mat, char_mat)
        sess.run(tf.global_variables_initializer())
        variables = tf.contrib.framework.get_variables_to_restore()
        variables_to_restore = variables
        #variables_to_restore = [v for v in variables if v.name.split('/')[0] != 'output']

        saver = tf.train.Saver()
        # saver = tf.train.Saver()

        saver.restore(sess,
                      tf.train.latest_checkpoint(config.span_save_dir_trained))
        '''
        from tensorflow.python import pywrap_tensorflow
        checkpoint_path = tf.train.latest_checkpoint(config.span_save_dir_trained)
        reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
        var_to_shape_map = reader.get_variable_to_shape_map()

        reader = tf.train.NewCheckpointReader(tf.train.latest_checkpoint(config.span_save_dir_trained))

        variables = reader.get_variable_to_shape_map()
        saver.restore(sess, tf.train.latest_checkpoint(config.span_save_dir_trained))
        '''
        sess.run(
            tf.assign(selector.is_train, tf.constant(False, dtype=tf.bool)))
        sess.run(tf.assign(spaner.is_train, tf.constant(False, dtype=tf.bool)))
        losses = []
        answer_dict = {}
        remapped_dict = {}
        for step in tqdm(range(total // config.batch_size)):

            tensor_dict, _ = dp_test.get_train_batch(step, is_test=True)

            qa_id, yp, outer, para_encode, para_enc_mask, q_emb, sen_emb, lo = sess.run(
                [
                    selector.qa_id, selector.yp, selector.outer,
                    selector.att_s, selector.c_p_mask, selector.q_emb,
                    selector.c_emb, selector.lo
                ],
                feed_dict={
                    selector.qa_id: tensor_dict['ids'],
                    selector.q: tensor_dict['ques_idxs'],
                    selector.cs: tensor_dict['context_s_idxs'],
                    selector.y: tensor_dict['y'],
                    selector.ce: tensor_dict['context_s_exist_tag'],
                    selector.ct: tensor_dict['context_type_tag'],
                    selector.qt: tensor_dict['ques_type_tag']
                })
            select_sentence = []
            sentences_len = []
            q = []
            for i in range(config.batch_size):
                ques = np.zeros([config.ques_limit, q_emb.shape[-1]],
                                np.float32)
                ques[:q_emb.shape[-2]] = q_emb[i]
                q.append(ques)
                sentences = []
                sentence_len = []
                sum = tensor_dict['sentence_num'][i]
                indexs = np.argsort(-outer[i])
                for j in range(config.k):
                    top_index = indexs[j]
                    sentence = np.zeros([config.sen_len, sen_emb.shape[-1]],
                                        np.float32)
                    sentence[:sen_emb.shape[-2]] = sen_emb[i][top_index]
                    sentences.append(sentence)
                    sentence_length = np.arange(
                        sum[indexs[j] - 1],
                        sum[indexs[j] - 1] + config.sen_len, 1)
                    sentence_len.append(sentence_length)
                sentence_len = np.array(sentence_len)
                sentences = np.array(sentences)
                select_sentence.append(sentences)
                sentences_len.append(sentence_len)
            select_sentences = np.array(select_sentence)
            sentences_lens = np.array(sentences_len)
            q = np.array(q)
            qa_id, loss, train_op_span, yp1, yp2 = sess.run(
                [
                    spaner.qa_id, spaner.loss_span, spaner.train_op,
                    spaner.yp1, spaner.yp2
                ],
                feed_dict={
                    spaner.qa_id: tensor_dict['ids'],
                    spaner.para: tensor_dict['para_idxs'],
                    spaner.para_e: tensor_dict['para_exist_tag'],
                    spaner.para_t: tensor_dict['para_type_tag'],
                    spaner.q: q,
                    spaner.sentence: select_sentences,
                    spaner.sentence_index: sentences_lens,
                    spaner.outer: outer,
                    spaner.para_enc: para_encode,
                    spaner.para_enc_mask: para_enc_mask,
                    spaner.y1: tensor_dict['y1'],
                    spaner.y2: tensor_dict['y2']
                })

            answer_dict_, remapped_dict_ = convert_tokens(
                eval_file, qa_id.tolist(), yp1.tolist(), yp2.tolist())
            answer_dict.update(answer_dict_)
            remapped_dict.update(remapped_dict_)
            losses.append(loss)
        loss = np.mean(losses)
        metrics = evaluate(eval_file, answer_dict)
        with open(config.answer_file, "w") as fh:
            json.dump(remapped_dict, fh)
        print("Exact Match: {}, F1: {}".format(metrics['exact_match'],
                                               metrics['f1']))
Пример #20
0
def main(argv=None):

	files = []

	#if FLAGS.facedetect_model_type is 'MTCNN':
	#	print('Using face detector %s' %(FLAGS.facedetect_model_type))
	#	img = cv2.imread(FLAGS.filename)
	#	img_resize = cv2.resize(img, (640,480))
	#	img_resize = cv2.cvtColor(img_resize, cv2.COLOR_BGR2RGB)
	#	faceimagesequence, faceimagelocation, faceimagelandmarks, numberoffaces = MTCNNDetectFace(image)

	# Load Model
	if FLAGS.multitask:
		config = tf.ConfigProto(allow_soft_placement=True)
		with tf.Session(config=config) as sess:
			
			age_nlabels = len(AGE_LIST)
			gender_nlabels = len(GENDER_LIST)

			print('Executing on %s ' % FLAGS.device_id)
			model_fn = select_model(FLAGS.model_type)

			with tf.device(FLAGS.device_id):
				images = tf.placeholder(tf.float32, [None,RESIZE_FINAL,RESIZE_FINAL, 3], name='input')
				agelogits, genderlogits = model_fn(age_nlabels,images, gender_nlabels, images, 1, False)
				init = tf.global_variables_initializer()
				requested_step = FLAGS.requested_step if FLAGS.requested_step else None
				checkpoint_path = '%s' % (FLAGS.model_dir)
				model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
				saver = tf.train.Saver()
				saver.restore(sess, model_checkpoint_path)
				softmax_age_output = tf.nn.softmax(agelogits, name='ageoutput')
				softmax_gender_output = tf.nn.softmax(genderlogits, name='genderoutput')
				print (softmax_age_output)
				print (softmax_gender_output)

				coder = ImageCoder()
				#support a batch mode if no face detection model
				if len(files) == 0:
					if (os.path.isdir(FLAGS.filename)):
						for relpath in os.listdir(FLAGS.filename):
							abspath = os.path.join(FLAGS.filename, relpath)

							if os.path.isfile(abspath) and any([abspath.endswith('.'+ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
								print(abspath)
								files.append(abspath)

					else:
						files.append(FLAGS.filename)
						# if it happens to be a list file, read the list and clobber the files
						if any([FLAGS.filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
							files = list_images(FLAGS.filename)

				writer = None
				output = None
				if FLAGS.resultfile:
					print('Creating output file %s ' % FLAGS.resultfile)
					output = open(FLAGS.resultfile, 'w')
					writer = csv.writer(output)
					writer.writerow(('file', 'label', 'score'))

				image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
				print(image_files)

				if FLAGS.single_look:

					classify_many_single_crop(sess, AGE_LIST, softmax_age_output,
						coder, images, image_files, writer)
					classify_many_single_crop(sess, GENDER_LIST, softmax_gender_output,
						coder, images, image_files, writer)
					
				else:

					for image_file in image_files:
						classify_one_multi_crop(sess, AGE_LIST, softmax_age_output,
							coder, images, image_file, writer)
						classify_one_multi_crop(sess, GENDER_LIST, softmax_gender_output,
							coder, images, image_file, writer)


				if output is not None:
					output.close()


				if FLAGS.convertpb:
					# retrieve the protobuf graph definition
					graph = tf.get_default_graph()
					input_graph_def = graph.as_graph_def()
					output_node_names = 'ageoutput_1,genderoutput_1'
					output_graph_def = graph_util.convert_variables_to_constants(
						sess, #The session is used to retrieve the weights
						input_graph_def, #The graph_def is used to retrieve the nodes
						output_node_names.split(",") #The output node names are used to select the usefull nodes
					)

					# finally we serialize and dump the output graph to the filesystem
					output_pb_file = FLAGS.model_dir+os.sep+FLAGS.model_type+'.pb'
					with tf.gfile.GFile(output_pb_file, "wb") as f:
						f.write(output_graph_def.SerializeToString())
					print("%d ops in the final graph." % len(output_graph_def.node) )

	else:

		config = tf.ConfigProto(allow_soft_placement=True)
		with tf.Session(config=config) as sess:
			
			if FLAGS.class_type == 'Age':
				label_list = AGE_LIST
			elif FLAGS.class_type == 'Gender':
				label_list = GENDER_LIST			
			nlabels = len(label_list)

			print('Executing on %s' % FLAGS.device_id)
			model_fn = select_model(FLAGS.model_type)

			with tf.device(FLAGS.device_id):
				images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3],name='input')
				logits = model_fn(nlabels, images, 1, False)
				init = tf.global_variables_initializer()

				requested_step = FLAGS.requested_step if FLAGS.requested_step else None

				checkpoint_path = '%s' % (FLAGS.model_dir)

				model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)

				saver = tf.train.Saver()
				saver.restore(sess, model_checkpoint_path)

				if FLAGS.class_type == 'Age':
					softmax_output = tf.nn.softmax(logits, name='ageoutput')
				elif FLAGS.class_type == 'Gender':
					softmax_output = tf.nn.softmax(logits, name='genderoutput')

				coder = ImageCoder()

				# Support a batch mode if no face detection model
				if len(files) == 0:

					if (os.path.isdir(FLAGS.filename)):
						for relpath in os.listdir(FLAGS.filename):
							abspath = os.path.join(FLAGS.filename, relpath)

							if os.path.isfile(abspath) and any([abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
								print(abspath)
								files.append(abspath)
					else:
						files.append(FLAGS.filename)
						# If it happens to be a list file, read the list and clobber the files
						if any([FLAGS.filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
							files = list_images(FLAGS.filename)

				writer = None
				output = None
				if FLAGS.resultfile:
					print('Creating output file %s ' % FLAGS.resultfile)
					output = open(FLAGS.resultfile, 'w')
					writer = csv.writer(output)
					writer.writerow(('file', 'label', 'score'))

				image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
				print(image_files)

				if FLAGS.single_look:
					classify_many_single_crop(sess, label_list, softmax_output, coder, images, image_files, writer)
				else:
					for image_file in image_files:
						classify_one_multi_crop(sess, label_list, softmax_output, coder, images, image_file, writer)

				if output is not None:
					output.close()

				if FLAGS.convertpb:
					# retrieve the protobuf graph definition
					graph = tf.get_default_graph()
					input_graph_def = graph.as_graph_def()
					if FLAGS.class_type == 'Age':
						output_node_names = 'ageoutput'
					elif FLAGS.class_type == 'Gender':
						output_node_names = 'genderoutput'

					output_graph_def = graph_util.convert_variables_to_constants(
						sess, #The session is used to retrieve the weights
						input_graph_def, #The graph_def is used to retrieve the nodes
						output_node_names.split(",") #The output node names are used to select the usefull nodes
					)

					# finally we serialize and dump the output graph to the filesystem
					output_pb_file = FLAGS.model_dir+os.sep+FLAGS.model_type+'_'+FLAGS.class_type+'.pb'
					with tf.gfile.GFile(output_pb_file, "wb") as f:
						f.write(output_graph_def.SerializeToString())
					print("%d ops in the final graph." % len(output_graph_def.node) )
Пример #21
0
def guess(model_dir='./21936', class_type='genda', model_type='inception',filename='', device_id='/cpu:0', requested_step='', target='future_genda_prediction', checkpoint='14999',  face_detection_model='', face_detection_type='cascade',count=''):

    files = []
    
    if face_detection_model:
        print('Using face detector (%s) %s' % (face_detection_type, face_detection_model))
        face_detect = face_detection_model(face_detection_type, face_detection_model)
        face_files, rectangles = face_detect.run(filename)
        print(face_files)
        files += face_files

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:

        if class_type == 'genda':
            label_list = GENDER_LIST
        elif class_type == 'age':
            label_list = AGE_LIST
        nlabels = len(label_list)

        model_fn = select_model(model_type)

        with tf.device(device_id):
            
            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False,count)
            init = tf.global_variables_initializer()
            
            requested_step = requested_step if requested_step else None
        
            checkpoint_path = '%s' % (model_dir)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, checkpoint)
            #加载tf模型的方法
            if count==0:
                startT=time.time()
                saver = tf.train.Saver()
                saver.restore(sess, model_checkpoint_path)
                endT = time.time()
                loadms = (endT - startT) * 1000
                print(loadms)

            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()

            # Support a batch mode if no face detection model
            if len(files) == 0:
                if (os.path.isdir(filename)):
                    for relpath in os.listdir(filename):
                        abspath = os.path.join(filename, relpath)
                        
                        if os.path.isfile(abspath) and any([abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
                            print(abspath)
                            files.append(abspath)
                else:
                    files.append(filename)
                    # If it happens to be a list file, read the list and clobber the files
                    if any([filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
                        files = list_images(filename)
                
            writer = None
            output = None
            if target:
                print('Creating output file %s' % target)
                output = open(target, 'w')
                writer = csv.writer(output)
                writer.writerow(('file', 'label', 'score'))
            image_files = list(filter(lambda x: x is not None, [resolve_file(f) for f in files]))
            print(image_files)
            for image_file in image_files:     #需要将files中的多个结果综合一下
                classify_one_multi_crop(sess, init,label_list, softmax_output, coder, images, image_file, writer)

            if output is not None:
                output.close()
Пример #22
0
def train(config):
    with open(config.word_emb_file, "r") as fh:
        word_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.char_emb_file, "r") as fh:
        char_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.train_eval_file, "r") as fh:
        train_eval_file = json.load(fh)
    with open(config.dev_eval_file, "r") as fh:
        dev_eval_file = json.load(fh)

    dp_train = DataProcessor('train', config)
    dp_dev = DataProcessor('dev', config)
    #dp_dev = dp_train
    dev_total = dp_dev.get_data_size()

    print("Building model...")

    sess_config = tf.ConfigProto(allow_soft_placement=True)
    sess_config.gpu_options.allow_growth = True

    loss_save = 100.0
    patience = 0
    lr = config.init_lr
    lr_span = config.init_lr_span
    train_batch_num = int(np.floor(
        dp_train.num_samples / config.batch_size)) - 1
    batch_no = 0
    SN = config.k
    with tf.Session(config=sess_config) as sess:
        selector = select_model(sess, config, word_mat, char_mat)
        spaner = span_model(sess, config, word_mat, char_mat)
        writer = tf.summary.FileWriter(config.RL_log_dir)
        sess.run(tf.global_variables_initializer())

        #variables = tf.contrib.framework.get_variables_to_restore()
        #variables_to_restore = variables
        # #variables_to_restore = [v for v in variables if v.name.split('/')[0] != 'output']
        # #
        #saver_selector = tf.train.Saver(variables_to_restore)
        # # saver_selector.restore(sess, tf.train.latest_checkpoint(config.save_dir))
        saver = tf.train.Saver()
        saver.restore(sess,
                      tf.train.latest_checkpoint(config.span_save_dir_trained))
        sess.run(tf.assign(selector.is_train, tf.constant(True,
                                                          dtype=tf.bool)))
        sess.run(tf.assign(spaner.is_train, tf.constant(True, dtype=tf.bool)))
        sess.run(tf.assign(selector.lr, tf.constant(lr, dtype=tf.float32)))
        sess.run(tf.assign(spaner.lr, tf.constant(lr_span, dtype=tf.float32)))
        sen_len_limit = config.sen_len
        question_type_num = 19
        for batch_time in tqdm(range(1, config.num_steps + 1)):

            tensor_dict, _ = dp_train.get_train_batch(batch_no %
                                                      train_batch_num)
            sess.run(
                tf.assign(selector.is_train, tf.constant(False,
                                                         dtype=tf.bool)))
            sess.run(
                tf.assign(spaner.is_train, tf.constant(True, dtype=tf.bool)))
            qa_id, yp, outer, para_encode, para_enc_mask, q_emb, sen_emb, lo = sess.run(
                [
                    selector.qa_id, selector.yp, selector.outer,
                    selector.att_s, selector.c_p_mask, selector.q_emb,
                    selector.c_emb, selector.lo
                ],
                feed_dict={
                    selector.qa_id: tensor_dict['ids'],
                    selector.q: tensor_dict['ques_idxs'],
                    selector.cs: tensor_dict['context_s_idxs'],
                    selector.y: tensor_dict['y'],
                    selector.ce: tensor_dict['context_s_exist_tag'],
                    selector.ct: tensor_dict['context_type_tag'],
                    selector.qt: tensor_dict['ques_type_tag']
                })
            np.savetxt("lo.txt", lo)

            select_sentence = []
            sentences_len = []
            q = []

            sentences_cs = []
            sentences_ce = []
            sentences_ct = []

            for i in range(config.batch_size):
                ques = np.zeros([config.ques_limit, q_emb.shape[-1]],
                                np.float32)
                ques[:q_emb.shape[-2]] = q_emb[i]
                q.append(ques)
                sentences = []
                sentence_len = []
                sum = tensor_dict['sentence_num'][i]
                indexs = np.argsort(-outer[i])
                #RL change
                # indexs = np.random.choice(a=outer.shape[0], size=config.k, replace=False, p=outer)

                for j in range(config.k):
                    top_index = indexs[j]
                    sentence = np.zeros([config.sen_len, sen_emb.shape[-1]],
                                        np.float32)
                    sentence[:sen_emb.shape[-2]] = sen_emb[i][top_index]
                    sentences.append(sentence)
                    sentence_length = np.arange(
                        sum[indexs[j] - 1],
                        sum[indexs[j] - 1] + config.sen_len, 1)
                    sentence_len.append(sentence_length)
                sentence_len = np.array(sentence_len)
                sentences = np.array(sentences)
                select_sentence.append(sentences)
                sentences_len.append(sentence_len)
            select_sentences = np.array(select_sentence)
            sentences_lens = np.array(sentences_len)
            q = np.array(q)

            global_step = sess.run(spaner.global_step) + 1
            qa_id, loss, train_op_span, yp1, yp2, topk,ou1,out2,\
            loss1,loss2,\
                lo1,lo2,lo3,lo4 = sess.run([spaner.qa_id, spaner.loss_span, spaner.train_op,spaner.yp1, spaner.yp2, spaner.topk,
                                                             spaner.out_lo1,spaner.out_lo2,
                                                             spaner.out1, spaner.out2,
                                           spaner.lo1,spaner.lo2,spaner.lo3,spaner.lo4],
                                                            feed_dict={spaner.qa_id: tensor_dict['ids'],

                                                                       spaner.para: tensor_dict['para_idxs'],
                                                                       spaner.para_e: tensor_dict['para_exist_tag'],
                                                                       spaner.para_t: tensor_dict['para_type_tag'],

                                                                       spaner.q: q,
                                                                       spaner.sentence: select_sentences,
                                                                       spaner.sentence_index: sentences_lens,
                                                                       spaner.outer: outer,
                                                                       spaner.para_enc: para_encode,
                                                                       spaner.para_enc_mask: para_enc_mask,
                                                                       spaner.y1: tensor_dict['y1'],
                                                                       spaner.y2: tensor_dict['y2']
                                                                       })
            answer_dict, _ = convert_tokens(train_eval_file, qa_id.tolist(),
                                            yp1.tolist(), yp2.tolist())
            reward = compute_reward(train_eval_file, answer_dict)

            sess.run(
                tf.assign(selector.is_train, tf.constant(True, dtype=tf.bool)))
            sess.run(
                tf.assign(spaner.is_train, tf.constant(False, dtype=tf.bool)))
            qa_id, yp, outer, sel_loss, train_op, policy = sess.run(
                [
                    selector.qa_id, selector.yp, selector.outer, selector.loss,
                    selector.train_op, selector.policy_log_part
                ],
                feed_dict={
                    selector.qa_id: tensor_dict['ids'],
                    selector.q: tensor_dict['ques_idxs'],
                    selector.cs: tensor_dict['context_s_idxs'],
                    selector.y: tensor_dict['y'],
                    selector.ce: tensor_dict['context_s_exist_tag'],
                    selector.ct: tensor_dict['context_type_tag'],
                    selector.qt: tensor_dict['ques_type_tag'],
                    selector.reward: reward
                })
            np.savetxt("topk.txt", topk)
            #            np.savetxt("out1.txt",ou1)
            np.savetxt("l1.txt", lo1)
            np.savetxt("l2.txt", lo2)
            np.savetxt("l3.txt", lo3)
            np.savetxt("l4.txt", lo4)
            np.savetxt("lss1.txt", loss1)

            np.savetxt("lss2.txt", loss2)
            np.savetxt("reward.txt", reward)
            np.savetxt("policy.txt", policy)

            # np.savetxt("loss3.txt", loss3)
            # numpy.savetxt('new.csv', my_matrix, delimiter=',')

            #print(loss3)

            print(loss)
            print("selector_loss" + str(sel_loss))

            batch_no = batch_no + 1
            if global_step % config.period == 0:
                loss_sum = tf.Summary(value=[
                    tf.Summary.Value(tag="model/loss", simple_value=loss),
                ])
                writer.add_summary(loss_sum, global_step)
            if batch_time % config.checkpoint == 0:
                sess.run(
                    tf.assign(selector.is_train,
                              tf.constant(False, dtype=tf.bool)))
                sess.run(
                    tf.assign(spaner.is_train, tf.constant(False,
                                                           dtype=tf.bool)))
                # _, summ = evaluate_batch(
                #     selector, config.val_num_batches, train_eval_file, sess, "train", dp_train)

                metrics, summ = evaluate_span_batch(
                    config, selector, spaner, dev_total // config.batch_size,
                    dev_eval_file, sess, "dev", dp_dev)
                for s in summ:
                    writer.add_summary(s, global_step)
                print('epoch' + str(global_step))
                print('dev_loss:' + str(metrics["loss"]))
                print('F1:' + str(metrics["f1"]))
                print('em:' + str(metrics["exact_match"]))

                sess.run(
                    tf.assign(spaner.is_train, tf.constant(True,
                                                           dtype=tf.bool)))

                dev_loss = metrics["loss"]
                if dev_loss < loss_save:
                    loss_save = dev_loss
                    patience = 0
                else:
                    patience += 1
                if patience >= config.patience:
                    lr /= 2.0
                    loss_save = dev_loss
                    patience = 0
                sess.run(
                    tf.assign(spaner.lr, tf.constant(lr, dtype=tf.float32)))
                for s in summ:
                    writer.add_summary(s, global_step)
                writer.flush()
                filename = os.path.join(config.RL_save_dir,
                                        "model_{}.ckpt".format(global_step))
                saver.save(sess, filename)
Пример #23
0
def construct(filename,
              class_type,
              model_type,
              model_dir,
              checkpoint='checkpoint',
              device='/cpu:0',
              target=None,
              classes=None):  # pylint: disable=unused-argument
    # sys.stdout = os.devnull
    # sys.stderr = os.devnull
    files = []
    with tf.Graph().as_default():
        with tf.Session() as sess:
            #with tf.Session() as sess:
            #print('\n111111111111111\n')
            #tf.reset_default_graph()
            label_list = AGE_LIST if class_type == 'age' else GENDER_LIST
            nlabels = len(label_list)
            #print('\n222222222222222\n')
            #print('Executing on %s' % FLAGS.device_id)
            model_fn = select_model(model_type)

            with tf.device(device):
                # sys.stdout = sys.__stdout__
                # sys.stderr = sys.__stderr__
                images = tf.placeholder(tf.float32,
                                        [None, RESIZE_FINAL, RESIZE_FINAL, 3])
                logits = model_fn(nlabels, images, 1, False)
                init = tf.global_variables_initializer()
                #print('\n333333333333333\n')
                requested_step = None

                checkpoint_path = '%s' % (model_dir)

                model_checkpoint_path, global_step = get_checkpoint(
                    checkpoint_path, requested_step, checkpoint)
                #print("\nglobal_step=",global_step)
                saver = tf.train.Saver()
                #print('\n44444444444444444\n')
                #print("PATH=",model_checkpoint_path,'\n')
                saver.restore(sess, model_checkpoint_path)
                #print('\n55555555555555555\n')
                softmax_output = tf.nn.softmax(logits)

                coder = ImageCoder()

                # Support a batch mode if no face detection model
                if len(files) == 0:
                    files.append(filename)
                    # If it happens to be a list file, read the list and clobber the files
                    if one_of(filename, ('csv', 'tsv', 'txt')):
                        files = batchlist(filename)

                for it, f in enumerate(files):
                    image_file = resolve_file(f)

                    if image_file is None: continue

                    try:
                        best_choice = classify(sess, label_list,
                                               softmax_output, coder, images,
                                               image_file)
                        #results[it][0]=f
                        #print('f=%s\nresult='%f,results)
                        print("\nClass_type=", class_type)
                        if class_type == 'age':
                            #print("\n%s\n"%it)
                            classes[it].age = best_choice[0]
                            # print(best_choice[0],'\n')
                            # print(results,'\n')
                            target.writerow(
                                (f, classes[it].gender, classes[it].age,
                                 '%.2f' % best_choice[1]))
                        if class_type == 'gender':
                            #print("\n222222222\n")
                            classes[it].name = f
                            classes[it].gender = best_choice[0]
                    except Exception as e:
                        print(e)
                        print('Failed to run image %s ' % image_file)
                    it += 1
                #if output is not None:
                #    output.close()
                # print(results)
                sess.close()
Пример #24
0
        from model import select_model, get_checkpoint
        from tensorflow.python.framework.graph_util import convert_variables_to_constants
        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            # tf.reset_default_graph()
            model_type = 'inception'
            if False:  #gender
                nlabels = 2
                model_dir = '21936'
                outname = 'gender_net'
            else:
                nlabels = 8
                model_dir = '22801'
                outname = 'age_net'

            model_fn = select_model('inception')

            with tf.device('/gpu:0'):
                images = tf.placeholder(tf.float32,
                                        [None, RESIZE_FINAL, RESIZE_FINAL, 3])

                logits = model_fn(nlabels, images, 1, False)
                init = tf.global_variables_initializer()
                requested_step = None
                checkpoint_path = '%s' % (os.path.join(src_dir, 'inception',
                                                       model_dir))
                model_checkpoint_path, global_step = get_checkpoint(
                    checkpoint_path, requested_step, 'checkpoint')
                saver = tf.train.Saver()
                saver.restore(sess, model_checkpoint_path)
                softmax_output = tf.nn.softmax(logits, name='logits')
Пример #25
0
def main(argv=None):

    if not os.path.exists(FLAGS.model_dir):
        os.mkdir(FLAGS.model_dir)
    folddirlist = FLAGS.train_dir.split(os.sep)
    subdir = FLAGS.model_dir + os.sep + folddirlist[-2]
    if not os.path.exists(subdir):
        os.mkdir(subdir)
    savemodeldir = subdir + os.sep + folddirlist[-1]
    if not os.path.exists(savemodeldir):
        os.mkdir(savemodeldir)

    if FLAGS.multitask:

        with tf.Graph().as_default():
            model_fn = select_model(FLAGS.model_type)
            # Open the metadata file and figure out nlabels, and size of epoch
            input_file_age = os.path.join(FLAGS.train_dir, 'mdage.json')
            input_file_gender = os.path.join(FLAGS.train_dir, 'mdgender.json')
            with open(input_file_age, 'r') as fage:
                mdage = json.load(fage)
            with open(input_file_gender, 'r') as fgender:
                mdgender = json.load(fgender)

            images, agelabels, genderlabels, _ = multiinputs(
                FLAGS.train_dir,
                batch_size=FLAGS.batch_size,
                image_size=FLAGS.image_size,
                train=True,
                num_preprocess_threads=FLAGS.num_preprocess_threads,
                datatype='train')
            agelogits, genderlogits = model_fn(mdage['nlabels'], images,
                                               mdgender['nlabels'], images,
                                               1 - FLAGS.pdrop, True)
            agelosses, genderlosses, totallosses = multiloss(
                agelogits, agelabels, genderlogits, genderlabels)
            agegendertrain_op = optimizer(FLAGS.optim, FLAGS.eta, totallosses,
                                          FLAGS.steps_per_decay,
                                          FLAGS.eta_decay_rate)

            saver = tf.train.Saver(tf.global_variables())
            summary_op = tf.summary.merge_all()
            sess = tf.Session(config=tf.ConfigProto(
                log_device_placement=FLAGS.log_device_placement))
            tf.global_variables_initializer().run(session=sess)

            # fine-tune dp_multitask and mobilenet_multitask
            if FLAGS.pre_checkpoint_path:
                print('Trying to restore checkpoint from %s ' %
                      FLAGS.pre_checkpoint_path)

                if FLAGS.model_type is 'LMTCNN':
                    all_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
                                                      scope="multitaskdpcnn")
                elif FLAGS.model_type is 'mobilenet_multitask':
                    all_variables = tf.get_collection(
                        tf.GraphKeys.VARIABLES, scope="MobileNetmultitask")

                age_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
                                                  scope="ageoutput")
                gender_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
                                                     scope="genderoutput")
                all_variables.extend(age_variables)
                all_variables.extend(gender_variables)
                restorer = tf.train.Saver(all_variables)
                restorer.restore(sess, FLAGS.pre_checkpoint_path)

                print('%s: Pre-trained model restored from %s' %
                      (datetime.now(), FLAGS.pre_checkpoint_path))

            run_dir = '%s/%s-run-%d' % (savemodeldir, FLAGS.model_type,
                                        os.getpid())
            checkpoint_path = '%s/%s' % (run_dir, FLAGS.checkpoint)
            if tf.gfile.Exists(run_dir) is False:
                print('Creating %s' % run_dir)
                tf.gfile.MakeDirs(run_dir)

            tf.train.write_graph(sess.graph_def,
                                 run_dir,
                                 'agegendermodel.pb',
                                 as_text=True)
            tf.train.start_queue_runners(sess=sess)
            summary_writer = tf.summary.FileWriter(run_dir, sess.graph)

            stemps_per_train_epoch = int(mdage['train_counts'] /
                                         FLAGS.batch_size)
            num_steps = FLAGS.max_steps if FLAGS.epochs < 1 else FLAGS.epochs * steps_per_train_epoch
            print('Requested number of steps [%d]' % num_steps)

            for step in xrange(num_steps):
                start_time = time.time()
                _, totallossvalue, agelossvalue, genderlossvalue, imagesvalue, agelabelsvalue, genderlabelsvalue = sess.run(
                    [
                        agegendertrain_op, totallosses, agelosses,
                        genderlosses, images, agelabels, genderlabels
                    ])
                duration = time.time() - start_time

                assert not np.isnan(
                    agelossvalue), 'Model diverged with ageloss = NaN'
                assert not np.isnan(
                    genderlossvalue), 'Model diverged with genderloss = NaN'
                assert not np.isnan(
                    totallossvalue), 'Model diverged with totallossvalue= NaN'

                if step % 10 == 0:
                    num_examples_per_step = FLAGS.batch_size
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = float(duration)

                    format_str = (
                        '%s: step %d, ageloss= %.3f, genderloss= %.3f , totalloss= %.3f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, agelossvalue[0],
                                        genderlossvalue[0], totallossvalue,
                                        examples_per_sec, sec_per_batch))

                # loss evaluated every 100 steps
                if step % 100 == 0:
                    summary_str = sess.run(summary_op)
                    summary_writer.add_summary(summary_str, step)

                if step % 1000 == 0 or (step + 1) == num_steps:
                    saver.save(sess, checkpoint_path, global_step=step)

    else:

        with tf.Graph().as_default():
            model_fn = select_model(FLAGS.model_type)

            if FLAGS.class_type == 'Age':
                input_file_age = os.path.join(FLAGS.train_dir, 'mdage.json')
                with open(input_file_age, 'r') as fage:
                    mdage = json.load(fage)

                images, labels, _ = inputs_mod(
                    FLAGS.train_dir,
                    batch_size=FLAGS.batch_size,
                    image_size=FLAGS.image_size,
                    train=True,
                    num_preprocess_threads=FLAGS.num_preprocess_threads,
                    classtype=FLAGS.class_type,
                    datatype='train')
                logits = model_fn(mdage['nlabels'], images, 1 - FLAGS.pdrop,
                                  True)
                total_loss = loss(logits, labels)

            elif FLAGS.class_type == 'Gender':
                input_file_gender = os.path.join(FLAGS.train_dir,
                                                 'mdgender.json')
                with open(input_file_gender, 'r') as fgender:
                    mdgender = json.load(fgender)

                images, labels, _ = inputs_mod(
                    FLAGS.train_dir,
                    batch_size=FLAGS.batch_size,
                    image_size=FLAGS.image_size,
                    train=True,
                    num_preprocess_threads=FLAGS.num_preprocess_threads,
                    classtype=FLAGS.class_type,
                    datatype='train')
                logits = model_fn(mdgender['nlabels'], images, 1 - FLAGS.pdrop,
                                  True)
                total_loss = loss(logits, labels)

            train_op = optimizer(FLAGS.optim, FLAGS.eta, total_loss,
                                 FLAGS.steps_per_decay, FLAGS.eta_decay_rate)
            saver = tf.train.Saver(tf.global_variables())
            summary_op = tf.summary.merge_all()

            sess = tf.Session(config=tf.ConfigProto(
                log_device_placement=FLAGS.log_device_placement))
            tf.global_variables_initializer().run(session=sess)

            # it only works to fine-tune inception v3
            if FLAGS.pre_model:
                inception_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
                                                        scope="InceptionV3")
                restorer = tf.train.Saver(inception_variables)
                restorer.restore(sess, FLAGS.pre_model)

            if FLAGS.pre_checkpoint_path:
                if tf.gfile.Exists(FLAGS.pre_checkpoint_path) is True:
                    print('Trying to restore checkpoint from %s' %
                          FLAGS.pre_checkpoint_path)
                    restorer = tf.train.Saver()
                    tf.train.latest_checkpoint(FLAGS.pre_checkpoint_path)
                    print('%s: Pre-trained model restored from %s' %
                          (datetime.now(), FLAGS.pre_checkpoint_path))

            run_dir = '%s/%s-%s-run-%d' % (savemodeldir, FLAGS.model_type,
                                           FLAGS.class_type, os.getpid())
            checkpoint_path = '%s/%s' % (run_dir, FLAGS.checkpoint)
            if tf.gfile.Exists(run_dir) is False:
                print('Creating %s' % run_dir)
                tf.gfile.MakeDirs(run_dir)

            tf.train.write_graph(sess.graph_def,
                                 run_dir,
                                 'model.pb',
                                 as_text=True)
            tf.train.start_queue_runners(sess=sess)
            summary_writer = tf.summary.FileWriter(run_dir, sess.graph)

            if FLAGS.class_type == 'Age':
                steps_per_train_epoch = int(mdage['train_counts'] /
                                            FLAGS.batch_size)
            elif FLAGS.class_type == 'Gender':
                steps_per_train_epoch = int(mdgender['train_counts'] /
                                            FLAGS.batch_size)

            num_steps = FLAGS.max_steps if FLAGS.epochs < 1 else FLAGS.epochs * steps_per_train_epoch
            print('Requested number of steps [%d]' % num_steps)

            for step in xrange(num_steps):
                start_time = time.time()
                _, loss_value = sess.run([train_op, total_loss])
                duration = time.time() - start_time

                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                if step % 10 == 0:
                    num_examples_per_step = FLAGS.batch_size
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = float(duration)

                    format_str = (
                        '%s: step %d, loss = %.3f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), step, loss_value,
                                        examples_per_sec, sec_per_batch))

                # loss evaluated every 100 steps
                if step % 100 == 0:
                    summary_str = sess.run(summary_op)
                    summary_writer.add_summary(summary_str, step)

                if step % 1000 == 0 or (step + 1) == num_steps:
                    saver.save(sess, checkpoint_path, global_step=step)
Пример #26
0
def main(argv=None):
    with tf.Graph().as_default():

        global_step = tf.Variable(0, trainable=False)

        model_fn = select_model(FLAGS.model_type)
        # Open the metadata file and figure out nlabels, and size of epoch
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        images, labels, _ = distorted_inputs(FLAGS.train_dir, FLAGS.batch_size,
                                             FLAGS.image_size,
                                             FLAGS.num_preprocess_threads)
        if not FLAGS.dual:
            logits = model_fn(md['nlabels'], images, 1 - FLAGS.pdrop, True)
            total_loss, accuracy = loss(logits, labels, global_step)
        else:
            with tf.variable_scope("net1") as scope:
                logits1 = model_fn(md['nlabels'], images, 1 - FLAGS.pdrop,
                                   True)
            with tf.variable_scope("net2") as scope:
                logits2 = model_fn(md['nlabels'], images, 1 - FLAGS.pdrop,
                                   True)

            pred1 = tf.argmax(logits1, 1)
            pred2 = tf.argmax(logits2, 1)

            update_step = tf.stop_gradient(
                tf.to_float(
                    tf.logical_or(tf.not_equal(pred1, pred2),
                                  global_step < FLAGS.init_iter)))

            with tf.variable_scope("net1") as scope:
                if FLAGS.min_batch_size == -1:
                    total_loss1, accuracy1 = loss(logits1, labels, global_step,
                                                  None, scope.name)
                else:
                    total_loss1, accuracy1 = loss(logits1, labels, global_step,
                                                  update_step, scope.name)
            with tf.variable_scope("net2") as scope:
                if FLAGS.min_batch_size == -1:
                    total_loss2, accuracy2 = loss(logits2, labels, global_step,
                                                  None, scope.name)
                else:
                    total_loss2, accuracy2 = loss(logits2, labels, global_step,
                                                  update_step, scope.name)

            disagree_rate = tf.reduce_mean(
                tf.to_float(tf.not_equal(pred1, pred2)))

        if not FLAGS.dual:
            train_op = optimizer(FLAGS.optim, FLAGS.eta, total_loss)
        else:
            with tf.variable_scope("net1") as scope:
                var_net1 = [
                    var for var in tf.all_variables()
                    if var.name.startswith("net1")
                ]
                train_op1 = optimizer(FLAGS.optim,
                                      FLAGS.eta,
                                      total_loss1,
                                      variables=var_net1,
                                      name=scope.name)
            with tf.variable_scope("net2") as scope:
                var_net2 = [
                    var for var in tf.all_variables()
                    if var.name.startswith("net2")
                ]
                train_op2 = optimizer(FLAGS.optim,
                                      FLAGS.eta,
                                      total_loss2,
                                      variables=var_net2,
                                      name=scope.name)
        saver = tf.train.Saver(tf.all_variables(), max_to_keep=151)
        summary_op = tf.merge_all_summaries()
        init = tf.initialize_all_variables()
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement))

        sess.run(init)

        # This is total hackland, it only works to fine-tune iv3
        if FLAGS.pre_model:
            inception_variables = tf.get_collection(tf.GraphKeys.VARIABLES,
                                                    scope="InceptionV3")
            restorer = tf.train.Saver(inception_variables)
            restorer.restore(sess, FLAGS.pre_model)

        if FLAGS.pre_checkpoint_path:
            if tf.gfile.Exists(FLAGS.pre_checkpoint_path) is True:
                print('Trying to restore checkpoint from %s' %
                      FLAGS.pre_checkpoint_point)
                restorer = tf.train.Saver()
                tf.train.latest_checkpoint(FLAGS.pre_checkpoint_path)
                print('%s: Pre-trained model restored from %s' %
                      (datetime.now(), FLAGS.pre_checkpoint_path))

        run_dir = '%s/run-%d' % (FLAGS.train_dir, os.getpid())

        checkpoint_path = '%s/%s' % (run_dir, FLAGS.checkpoint)
        if tf.gfile.Exists(run_dir) is False:
            print('Creating %s' % run_dir)
            tf.gfile.MakeDirs(run_dir)

        tf.train.write_graph(sess.graph_def, run_dir, 'model.pb', as_text=True)

        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.train.SummaryWriter(run_dir, sess.graph)
        steps_per_train_epoch = int(md['train_counts'] / FLAGS.batch_size)
        num_steps = FLAGS.max_steps if FLAGS.epochs < 1 else FLAGS.epochs * steps_per_train_epoch
        print('Requested number of steps [%d]' % num_steps)

        trainable_buffer_img = None
        trainable_buffer_lbl = None
        for step in range(num_steps):
            start_time = time.time()
            if FLAGS.Qloss:
                _, loss_value, acc_value, q_val = sess.run(
                    [train_op, total_loss, accuracy, Q_GLOBAL],
                    feed_dict={global_step: step})
                print(q_val)
            elif not FLAGS.dual:
                _, loss_value, acc_value = sess.run(
                    [train_op, total_loss, accuracy],
                    feed_dict={global_step: step})
            elif FLAGS.dual and (step < FLAGS.init_iter
                                 or FLAGS.min_batch_size != -1):
                _, _, loss_value, acc_value1, acc_value2, drate = sess.run(
                    [
                        train_op1, train_op2, total_loss1, accuracy1,
                        accuracy2, disagree_rate
                    ],
                    feed_dict={global_step: step})
            else:
                #loss_value, acc_value1, acc_value2, drate = (0,0,0,0)
                img, lbl, us, loss_value, acc_value1, acc_value2, drate = sess.run(
                    [
                        images, labels, update_step, total_loss1, accuracy1,
                        accuracy2, disagree_rate
                    ],
                    feed_dict={global_step: step})
                rel_img = img[us == 1]
                rel_lbl = lbl[us == 1]
                if trainable_buffer_img is None:
                    trainable_buffer_img = rel_img
                    trainable_buffer_lbl = rel_lbl
                else:
                    print(np.shape(trainable_buffer_lbl), np.shape(rel_lbl))
                    trainable_buffer_img = np.vstack(
                        (trainable_buffer_img, rel_img))
                    trainable_buffer_lbl = np.hstack(
                        (trainable_buffer_lbl, rel_lbl))

                if trainable_buffer_img.shape[0] >= FLAGS.batch_size:
                    batch_img = trainable_buffer_img[:FLAGS.batch_size]
                    batch_lbl = trainable_buffer_lbl[:FLAGS.batch_size]
                    _, _, loss_value, acc_value1, acc_value2, drate = sess.run(
                        [
                            train_op1, train_op2, total_loss1, accuracy1,
                            accuracy2, disagree_rate
                        ],
                        feed_dict={
                            global_step: step,
                            images: batch_img,
                            labels: batch_lbl
                        })
                    trainable_buffer_img = trainable_buffer_img[FLAGS.
                                                                batch_size:]
                    trainable_buffer_lbl = trainable_buffer_lbl[FLAGS.
                                                                batch_size:]
                #_, loss_value, acc_value2, drate = sess.run([train_op2, total_loss2, accuracy2, disagree_rate], feed_dict={global_step: step})
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 1 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                if not FLAGS.dual:
                    format_str = (
                        '%s: step %d, loss = %.3f, acc = %.3f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str %
                          (datetime.now(), step, loss_value, acc_value,
                           examples_per_sec, sec_per_batch))
                else:
                    format_str = (
                        '%s: step %d, loss = %.3f, acc1 = %.3f, acc2 = %.3f, disagree_rate = %.3f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str %
                          (datetime.now(), step, loss_value, acc_value1,
                           acc_value2, drate, examples_per_sec, sec_per_batch))

            # Loss only actually evaluated every 100 steps?
            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == num_steps:
                saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None):
    with tf.Graph().as_default():
        serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
        feature_configs = {
            'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
        }
        tf_example = tf.parse_example(serialized_tf_example, feature_configs)
        jpegs = tf_example['image/encoded']

        images = tf.map_fn(preproc_jpeg, jpegs, dtype=tf.float32)
        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        config = tf.ConfigProto(allow_soft_placement=True)
        with tf.Session(config=config) as sess:
            model_fn = select_model(FLAGS.model_type)
            logits = model_fn(nlabels, images, 1, False)
            softmax_output = tf.nn.softmax(logits)
            values, indices = tf.nn.top_k(
                softmax_output, 2 if FLAGS.class_type == 'age' else 1)
            class_tensor = tf.constant(label_list)
            table = tf.contrib.lookup.index_to_string_table_from_tensor(
                class_tensor)
            classes = table.lookup(tf.to_int64(indices))
            requested_step = FLAGS.requested_step if FLAGS.requested_step else None
            checkpoint_path = '%s' % (FLAGS.model_dir)
            model_checkpoint_path, global_step = get_checkpoint(
                checkpoint_path, requested_step, FLAGS.checkpoint)

            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)
            print('Restored model checkpoint %s' % model_checkpoint_path)

            output_path = os.path.join(
                tf.compat.as_bytes(FLAGS.output_dir),
                tf.compat.as_bytes(str(FLAGS.model_version)))
            print('Exporting trained model to %s' % output_path)
            builder = tf.saved_model.builder.SavedModelBuilder(output_path)

            # Build the signature_def_map.
            classify_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(
                serialized_tf_example)
            classes_output_tensor_info = tf.saved_model.utils.build_tensor_info(
                classes)
            scores_output_tensor_info = tf.saved_model.utils.build_tensor_info(
                values)
            classification_signature = (
                tf.saved_model.signature_def_utils.build_signature_def(
                    inputs={
                        tf.saved_model.signature_constants.CLASSIFY_INPUTS:
                        classify_inputs_tensor_info
                    },
                    outputs={
                        tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES:
                        classes_output_tensor_info,
                        tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES:
                        scores_output_tensor_info
                    },
                    method_name=tf.saved_model.signature_constants.
                    CLASSIFY_METHOD_NAME))

            predict_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(
                jpegs)
            prediction_signature = (
                tf.saved_model.signature_def_utils.build_signature_def(
                    inputs={'images': predict_inputs_tensor_info},
                    outputs={
                        'classes': classes_output_tensor_info,
                        'scores': scores_output_tensor_info
                    },
                    method_name=tf.saved_model.signature_constants.
                    PREDICT_METHOD_NAME))

            legacy_init_op = tf.group(tf.tables_initializer(),
                                      name='legacy_init_op')
            builder.add_meta_graph_and_variables(
                sess, [tf.saved_model.tag_constants.SERVING],
                signature_def_map={
                    'predict_images':
                    prediction_signature,
                    tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                    classification_signature,
                },
                legacy_init_op=legacy_init_op)

            builder.save()
            print('Successfully exported model to %s' % FLAGS.output_dir)
Пример #28
0
def main(argv=None):  # pylint: disable=unused-argument


    with tf.Session() as sess:

        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        logits = model_fn(nlabels, images, 1, False)
        init = tf.global_variables_initializer()
            
        requested_step = FLAGS.requested_step if FLAGS.requested_step else None
        
        checkpoint_path = '%s' % (FLAGS.model_dir)

        model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
            
        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)
                        
        softmax_output = tf.nn.softmax(logits)

        coder = ImageCoder()

        files = []

        if FLAGS.face_detection_model:
            print('Using face detector %s' % FLAGS.face_detection_model)
            face_detect = FaceDetector(FLAGS.face_detection_model)
            face_files, rectangles = face_detect.run(FLAGS.filename)
            files += face_files

        # Support a batch mode if no face detection model
        if len(files) == 0:
            files.append(FLAGS.filename)
            # If it happens to be a list file, read the list and clobber the files
            if one_of(FLAGS.filename, ('csv', 'tsv', 'txt')):
                files = batchlist(FLAGS.filename)

        writer = None
        output = None
        if FLAGS.target:
            print('Creating output file %s' % FLAGS.target)
            output = open(FLAGS.target, 'w')
            writer = csv.writer(output)
            writer.writerow(('file', 'label', 'score'))


        for f in files:
            image_file = resolve_file(f)
            
            if image_file is None: continue

            try:
                best_choice = classify(sess, label_list, softmax_output, coder, images, image_file)
                if writer is not None:
                    writer.writerow((f, best_choice[0], '%.2f' % best_choice[1]))
            except Exception as e:
                print(e)
                print('Failed to run image %s ' % image_file)

        if output is not None:
            output.close()
Пример #29
0
    def predict(self,
                image_file=None,
                mode=0,
                image_bound=None,
                use_tf_to_read=True):
        model_dir = self.model_list[mode]
        class_type = self.class_list[mode]
        files = []

        config = tf.ConfigProto(allow_soft_placement=True)
        with tf.Session(config=config) as sess:

            label_list = self.AGE_LIST if class_type == 'age' else self.GENDER_LIST
            nlabels = len(label_list)

            model_fn = select_model(self.model_type)

            with tf.device(self.device_id):

                images = tf.placeholder(
                    tf.float32,
                    [None, self.RESIZE_FINAL, self.RESIZE_FINAL, 3])
                logits = model_fn(nlabels, images, 1, False)
                # init = tf.global_variables_initializer()

                requested_step = self.requested_step if self.requested_step else None

                checkpoint_path = '%s' % (model_dir)

                model_checkpoint_path, global_step = get_checkpoint(
                    checkpoint_path, requested_step, self.checkpoint)

                saver = tf.train.Saver()
                saver.restore(sess, model_checkpoint_path)

                softmax_output = tf.nn.softmax(logits)

                coder = ImageCoder()

                # Support a batch mode if no face detection model
                # if len(files) == 0:
                #     if (os.path.isdir(filename)):
                #         for relpath in os.listdir(filename):
                #             abspath = os.path.join(filename, relpath)
                #             print(abspath)
                #             if os.path.isfile(abspath) and any(
                #                     [abspath.endswith('.' + ty) for ty in ('jpg', 'png', 'JPG', 'PNG', 'jpeg')]):
                #                 print(abspath)
                #                 files.append(abspath)
                #     else:
                #         files.append(filename)
                #         # If it happens to be a list file, read the list and clobber the files
                #         if any([filename.endswith('.' + ty) for ty in ('csv', 'tsv', 'txt')]):
                #             files = self.list_images(filename)

                writer = None
                # output = None
                # if self.target:
                #     output = open(self.target, 'w')
                #     writer = csv.writer(output)
                #     writer.writerow(('file', 'label', 'score'))
                # image_files = list(filter(lambda x: x is not None, [self.resolve_file(f) for f in files]))
                # if self.single_look:
                #     self.classify_many_single_crop(sess, label_list, softmax_output, coder, images, image_files, writer)
                #
                # else:
                # for image_file in filename:
                best_choice, second_choice = self.classify_one_multi_crop(
                    sess,
                    label_list,
                    softmax_output,
                    coder,
                    images,
                    image_file,
                    writer,
                    image_bound=image_bound,
                    use_tf_to_read=use_tf_to_read)

                # if output is not None:
                #     output.close()
        return best_choice, second_choice
# Open as Session
#config = tf.ConfigProto(allow_soft_placement=True)
#sess = tf.Session()

# Creating different graphs
g1 = tf.Graph()
g2 = tf.Graph()

# AGE MODEL
with g1.as_default():
    session = tf.Session(graph=g1)
    with session.as_default():
        images = tf.placeholder(tf.float32,
                                [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        ageNet = select_model('default')
        logits = ageNet(len(AGE_LIST), images, 1, False)
        requested_step = 14999
        checkpoint_path = '%s' % ('C:/Users/LEE/Desktop/rude-carnie/age_model')
        model_checkpoint_path, global_step = get_checkpoint(
            checkpoint_path, requested_step, 'checkpoint')
        age_saver = tf.train.Saver()
        age_saver.restore(session, model_checkpoint_path)

# GENDER MODEL
with g2.as_default():
    session2 = tf.Session(graph=g2)
    with session2.as_default():
        images2 = tf.placeholder(tf.float32,
                                 [None, RESIZE_FINAL, RESIZE_FINAL, 3])
        genderNet = select_model('default')
Пример #31
0
def main(argv=None):
    with tf.Graph().as_default():

        model_fn = select_model(FLAGS.model_type)
        # Open the metadata file and figure out nlabels, and size of epoch
        input_file = os.path.join(FLAGS.train_dir, 'md.json')
        print(input_file)
        with open(input_file, 'r') as f:
            md = json.load(f)

        images, labels, _ = distorted_inputs(FLAGS.train_dir, FLAGS.batch_size, FLAGS.image_size, FLAGS.num_preprocess_threads)
        logits = model_fn(md['nlabels'], images, 1-FLAGS.pdrop, True)
        total_loss = loss(logits, labels)

        train_op = optimizer(FLAGS.optim, FLAGS.eta, total_loss)
        saver = tf.train.Saver(tf.global_variables())
        summary_op = tf.summary.merge_all()

        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement))

        tf.global_variables_initializer().run(session=sess)

        # This is total hackland, it only works to fine-tune iv3
        if FLAGS.pre_model:
            inception_variables = tf.get_collection(
                tf.GraphKeys.VARIABLES, scope="InceptionV3")
            restorer = tf.train.Saver(inception_variables)
            restorer.restore(sess, FLAGS.pre_model)

        if FLAGS.pre_checkpoint_path:
            if tf.gfile.Exists(FLAGS.pre_checkpoint_path) is True:
                print('Trying to restore checkpoint from %s' % FLAGS.pre_checkpoint_path)
                restorer = tf.train.Saver()
                tf.train.latest_checkpoint(FLAGS.pre_checkpoint_path)
                print('%s: Pre-trained model restored from %s' %
                      (datetime.now(), FLAGS.pre_checkpoint_path))


        run_dir = '%s/run-%d' % (FLAGS.train_dir, os.getpid())

        checkpoint_path = '%s/%s' % (run_dir, FLAGS.checkpoint)
        if tf.gfile.Exists(run_dir) is False:
            print('Creating %s' % run_dir)
            tf.gfile.MakeDirs(run_dir)

        tf.train.write_graph(sess.graph_def, run_dir, 'model.pb', as_text=True)

        tf.train.start_queue_runners(sess=sess)


        summary_writer = tf.summary.FileWriter(run_dir, sess.graph)
        steps_per_train_epoch = int(md['train_counts'] / FLAGS.batch_size)
        num_steps = FLAGS.max_steps if FLAGS.epochs < 1 else FLAGS.epochs * steps_per_train_epoch
        print('Requested number of steps [%d]' % num_steps)

        
        for step in xrange(num_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, total_loss])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)
                
                format_str = ('%s: step %d, loss = %.3f (%.1f examples/sec; %.3f ' 'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))

            # Loss only actually evaluated every 100 steps?
            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)
                
            if step % 1000 == 0 or (step + 1) == num_steps:
                saver.save(sess, checkpoint_path, global_step=step)
Пример #32
0
    nlabels = len(label_list)
    if nlabels > 2:
        output[best] = 0
        second_best = np.argmax(output)

        print('Guess @ 2 %s, prob = %.2f' % (label_list[second_best], output[second_best]))
    return best_choice


#调用guessGender前要先初始化
# tf.reset_default_graph()
label_list = GENDER_LIST
nlabels = len(label_list)

print('Executing on %s' % FLAGS.device_id)
model_fn = select_model(FLAGS.model_type)
images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
logits = model_fn(nlabels, images, 1, False)



def guessGender(file_name):  # pylint: disable=unused-argument
    #检测单张照片的性别
    with tf.Session() as sess:
        with tf.device(FLAGS.device_id):
            init = tf.global_variables_initializer()

            requested_step = FLAGS.requested_step if FLAGS.requested_step else None

            checkpoint_path = '%s' % (GENDER_MODEL_PATH)
Пример #33
0
def main(argv=None):  # pylint: disable=unused-argument

    files = []
    
    if FLAGS.face_detection_model:
        print('Using face detector (%s) %s' % (FLAGS.face_detection_type, FLAGS.face_detection_model))
        face_detect = face_detection_model(FLAGS.face_detection_type, FLAGS.face_detection_model)
        face_files, rectangles = face_detect.run(FLAGS.filename)
        print(face_files)
        files += face_files


    with tf.Session() as sess:

        #tf.reset_default_graph()
        label_list = AGE_LIST if FLAGS.class_type == 'age' else GENDER_LIST
        nlabels = len(label_list)

        print('Executing on %s' % FLAGS.device_id)
        model_fn = select_model(FLAGS.model_type)

        with tf.device(FLAGS.device_id):
            
            images = tf.placeholder(tf.float32, [None, RESIZE_FINAL, RESIZE_FINAL, 3])
            logits = model_fn(nlabels, images, 1, False)
            init = tf.global_variables_initializer()
            
            requested_step = FLAGS.requested_step if FLAGS.requested_step else None
        
            checkpoint_path = '%s' % (FLAGS.model_dir)

            model_checkpoint_path, global_step = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
            
            saver = tf.train.Saver()
            saver.restore(sess, model_checkpoint_path)
                        
            softmax_output = tf.nn.softmax(logits)

            coder = ImageCoder()

            # Support a batch mode if no face detection model
            if len(files) == 0:
                files.append(FLAGS.filename)
                # If it happens to be a list file, read the list and clobber the files
                if one_of(FLAGS.filename, ('csv', 'tsv', 'txt')):
                    files = batchlist(FLAGS.filename)

            writer = None
            output = None
            if FLAGS.target:
                print('Creating output file %s' % FLAGS.target)
                output = open(FLAGS.target, 'w')
                writer = csv.writer(output)
                writer.writerow(('file', 'label', 'score'))


            for f in files:
                image_file = resolve_file(f)
            
                if image_file is None: continue

                try:
                    best_choice = classify(sess, label_list, softmax_output, coder, images, image_file)
                    if writer is not None:
                        writer.writerow((f, best_choice[0], '%.2f' % best_choice[1]))
                except Exception as e:
                    print(e)
                    print('Failed to run image %s ' % image_file)

            if output is not None:
                output.close()