Ejemplo n.º 1
0
def generate_hashcode():
    short_hash_list = []
    train, train_label = get_data.get_zacao_input(train_dir, label_dir)
    train_batch, train_label_batch = get_data.get_batch2(
        train, train_label, 1, CAPACITY)

    image_contents = tf.read_file(train_batch[0])
    image_jpg = tf.image.decode_jpeg(image_contents, channels=3)
    image_jpg = tf.image.resize_images(image_jpg, [IMG_W, IMG_H],
                                       method=tf.image.ResizeMethod.AREA)
    image_jpg = tf.reshape(tensor=image_jpg, shape=[1, IMG_W, IMG_H, 3])

    logit, h_fc1_drop, hashcode = model.inference_without_dropout(
        image_jpg, 1, N_CLASSES)
    sess = tf.Session()

    saver = tf.train.Saver()
    print("Reading checkpoints...")
    ckpt = tf.train.get_checkpoint_state(logs_train_dir)
    if ckpt and ckpt.model_checkpoint_path:
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        saver.restore(sess, ckpt.model_checkpoint_path)
        print('Loading success, global_step is %s' % global_step)
    else:
        print('No checkpoint file found')

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    try:
        step = 0
        while step < len(train):
            if coord.should_stop():
                break
            h_fc1_drop2, hashcode2, train_batch2, train_label_batch2 = sess.run(
                [h_fc1_drop, hashcode, train_batch, train_label_batch])
            print(h_fc1_drop2.shape)
            print(h_fc1_drop2)
            print(hashcode2.shape)
            print(hashcode2)
            print(train_label_batch2)
            print(train_batch2)
            img_name = train_batch2[0].decode()
            short_hash_str = ','.join(str(i) for i in hashcode2[0])
            long_hash_str = ','.join(
                str(round(float(i), 8)) for i in h_fc1_drop2[0])
            train_label_batch_str = str(train_label_batch2[0])
            if short_hash_str not in short_hash_list:
                short_hash_list.append(short_hash_str)
                save_to_mysql2('suoyin', short_hash_str)
            save_to_mysql('hash', long_hash_str, short_hash_str, img_name,
                          train_label_batch_str)
            print('>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<')
            step += 1
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Ejemplo n.º 2
0
def run_training():
    # if tf.gfile.Exists(logs_train_dir):
    #     print(logs_train_dir)
    #     tf.gfile.DeleteRecursively(logs_train_dir)
    # tf.gfile.MakeDirs(logs_train_dir)

    train, train_label = get_data.get_zacao_input(train_dir, label_dir)
    train_batch, train_label_batch = get_data.get_batch_jpg(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
    train_logits = pre_model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = pre_model.losses(train_logits, train_label_batch)
    train_op = pre_model.trainning(train_loss, learning_rate)
    train__acc = pre_model.evaluation(train_logits, train_label_batch)
    summary_op = tf.summary.merge_all()
    sess = tf.Session()

    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    step = 0
    ckpt = tf.train.get_checkpoint_state(logs_train_dir)
    if ckpt and ckpt.model_checkpoint_path:
        step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
        saver.restore(sess, ckpt.model_checkpoint_path)
        print('Loading success, global_step is %d' % step)
        step += 1

    try:
        while step < MAX_STEP:
            if coord.should_stop():
                break
            _, tra_loss, tra_acc = sess.run([train_op, train_loss, train__acc])

            if step % 50 == 0 or step + 1 == MAX_STEP:
                print('Step %d, train loss = %.2f, train accuracy = %.2f%%' % (step, tra_loss, tra_acc * 100.0))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)

            if step % 1000 == 0 or step + 1 == MAX_STEP:
                checkpoint_path = os.path.join(logs_train_dir, 'model')
                saver.save(sess, checkpoint_path, global_step=step)
            step += 1
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Ejemplo n.º 3
0
def start_test():
    with tf.Graph().as_default():
        test, test_label = get_data.get_zacao_input(test_dir, test_label_dir)
        test_batch, test_label_batch = get_data.get_batch_jpg(
            test, test_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
        test_logits, _, _ = model.inference_without_dropout(
            test_batch, BATCH_SIZE, N_CLASSES)
        test_acc = model.evaluation(test_logits, test_label_batch)

        num_test = 1000
        num_iter = int(math.ceil(num_test / BATCH_SIZE))
        step = 0

        saver = tf.train.Saver()
        with tf.Session() as sess:
            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
            coord2 = tf.train.Coordinator()
            threads2 = tf.train.start_queue_runners(sess=sess, coord=coord2)
            try:
                count_acc = 0
                while step < num_iter:
                    if coord2.should_stop():
                        break
                    a, t = sess.run([test_batch, test_acc])
                    if step == 0:
                        print('Start test...')
                    count_acc += t
                    step += 1
                print('Test acc = %.2f%%' % (count_acc / step * 100))
            except tf.errors.OutOfRangeError:
                print('Done testing -- epoch limit reached')
            finally:
                coord2.request_stop()
            coord2.join(threads2)