Ejemplo n.º 1
0
def run_training():
    #目录
    train_dir="./image/"
    logs_train_dir ="./log"

    train,train_label=image_P.get_files(train_dir)
    train_batch,train_label_batch=image_P.get_batch(train,
                                            train_label,
                                            IMG_W,
                                            IMG_H,
                                            BATCH_SIZE,
                                            CAPACITY)
    train_logits=model.inference(train_batch,BATCH_SIZE,N_CLASSES)
    train_loss=model.losses(train_logits,train_label_batch)
    train_op=model.trainning(train_loss,learning_rate)
    train_acc=model.evaluation(train_logits,train_label_batch)

    summary_op=tf.summary.merge_all()

    sess=tf.Session()
    train_writer=tf.summary.FileWriter(logs_train_dir,sess.graph)
    #保存模型
    saver=tf.train.Saver()

    #初始化
    sess.run(tf.global_variables_initializer())

    #使用多线程
    coord=tf.train.Coordinator()
    threads=tf.train.start_queue_runners(sess=sess,coord=coord)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break#线程停止
            _,temp_loss,temp_acc=sess.run([train_op,train_loss,train_acc])
            
            #每迭代50次打印一次结果
            if step%50 == 0:
                print('Step %d,train loss = %.2f,train accuracy = %.2f'%(step,temp_loss,temp_acc))
                summary_str=sess.run(summary_op)
                train_writer.add_summary(summary_str,step)
            
            #每迭代200次或到达最后一次保存一次模型
            if step%200 == 0 or (step+1) == MAX_STEP:
                checkpoint_path=os.path.join(logs_train_dir,'model.ckpt')
                saver.save(sess,checkpoint_path,global_step=step)
    except tf.errors.OutOfRangeError:
        print('Failed!')
    finally:
        coord.request_stop()

    #结束线程
    coord.join(threads)

    sess.close()
Ejemplo n.º 2
0
#设定损失函数
sgd=SGD(lr=learning_rate,decay=1e-5)
model_vgg_train.compile(loss='categorical_crossentropy',
                        optimizer=sgd,metrics=['accuracy'])


#加载数据
def get_train_batch(X_train,Y_train,img_w,img_h):
    i=0
    while 1:
        x=[]
        y=[]
        image=Image.open(X_train[i])
        image=image.resize([img_w,img_h])
        x.append(np.array(image))
        y.append(to_categorical(Y_train[i],N_CLASSES))
        i=(i+1)%len(X_train)
        x=np.array(x)
        y=np.array(y)
        yield(x,y)

train,train_label=image_P.get_files(train_dir)

model_vgg_train.fit_generator(get_train_batch(train,
                    train_label,IMG_W,IMG_H),
                    steps_per_epoch=BATCH_SIZE,
                    epochs=MAX_STEP)

model_vgg_train.save(log_dir)
def run_training():
    train_dir = "./image/"
    logs_train_dir = "./log_" + str(MAX_STEP) + "_cap_" + str(CAPACITY)

    train, train_label = image_P.get_files(train_dir)
    train_batch, train_label_batch = image_P.get_batch(train, train_label,
                                                       IMG_W, IMG_H,
                                                       BATCH_SIZE, CAPACITY)
    train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
    train_loss = model.losses(train_logits, train_label_batch)
    train_op = model.trainning(train_loss, learning_rate)
    train_acc = model.evaluation(train_logits, train_label_batch)

    summary_op = tf.summary.merge_all()

    config = tf.ConfigProto(allow_soft_placement=True,
                            gpu_options=tf.GPUOptions(
                                per_process_gpu_memory_fraction=0.7,
                                allow_growth=True))

    sess = tf.Session(config=config)
    train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())

    tf.train.start_queue_runners(sess=sess)

    for step in np.arange(9999999):
        sess.run([train_op, train_loss, train_acc])

        print(step)

        if step == MAX_STEP:
            print("Finished")
            break

    log_dir = train_dir

    images_dir = './image/'

    images_cat = open("imagelist.txt")
    #保存所有图像经过模型计算之后的数组
    images_tested = []

    num_photos = 0
    outfile = open("test-new-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt",
                   "w")

    filelines = images_cat.readlines()
    for line in filelines:
        image_name = line.strip('\n')
        image_array = get_one_image(images_dir + image_name)
        image_array = np.reshape(image_array, [1, 300, 400, 3])

        xName = tf.placeholder(tf.float32, shape=[1, 300, 400, 3])
        prediction = sess.run(train_logits, feed_dict={xName: image_array})
        prediction = np.array(prediction, dtype='float32')
        images_tested.append([image_name, prediction])

        num_photos += 1
        print("Test:" + str(num_photos))

        outfile.writelines(image_name)
        t = str(prediction)
        outfile.writelines(t)
        outfile.close()
        outfile = open(
            "test-new-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")

    outfile.close()
    outfile2 = open(
        "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "w")
    outfile2.write("result = {\n")
    outfile2.close()
    num_photos = 0
    for line in filelines:
        num_photos += 1
        print("Find Near:" + str(num_photos))
        image_name = line.strip('\n')
        image_array = get_one_image(images_dir + image_name)
        image_array = np.reshape(image_array, [1, 300, 400, 3])
        outfile2 = open(
            "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")
        outfile2.write("'" + image_name + "': [\n")
        xName = tf.placeholder(tf.float32, shape=[1, 300, 400, 3])
        prediction = sess.run(train_logits, feed_dict={xName: image_array})
        prediction = np.array(prediction, dtype='float32')

        test_result = []
        for sample in images_tested:
            distance = np.sqrt(np.sum(np.square(sample[1] - prediction)))
            distance.astype('float32')
            test_result.append([sample[0], distance])

        #将结果排序
        test_result = np.array(test_result)
        test_result = test_result[np.lexsort(test_result.T)]
        for i in range(11):
            outfile2.write("'" + test_result[i][0] + "', ")
        outfile2.write("],\n")
        outfile2.close()

    outfile2 = open(
        "nearesttest-" + str(MAX_STEP) + "-" + str(CAPACITY) + ".txt", "a")
    outfile2.write("}\n")
    outfile2.close()

    sess.close()
Ejemplo n.º 4
0
sgd = SGD(lr=learning_rate, decay=1e-5)
model_vgg_train.compile(loss='categorical_crossentropy',
                        optimizer=sgd,
                        metrics=['accuracy'])


#加载数据
def get_train_batch(X_train, Y_train, img_w, img_h):
    i = 0
    while 1:
        x = []
        y = []
        image = Image.open(X_train[i])
        image = image.resize([img_w, img_h])
        x.append(np.array(image))
        y.append(to_categorical(Y_train[i], N_CLASSES))
        i = (i + 1) % len(X_train)
        x = np.array(x)
        y = np.array(y)
        yield (x, y)


train, train_label = image_P.get_files(train_dir, CAPACITY)

model_vgg_train.fit_generator(get_train_batch(train, train_label, IMG_W,
                                              IMG_H),
                              steps_per_epoch=BATCH_SIZE,
                              epochs=MAX_STEP)

model_vgg_train.save(log_dir)