def predict_task():
    num_files = 1
    flag1=True
    # print 'hhhhhh'

    seq_len = 32
    batch_size = 1
    num_classes = 19

    model_prefix = './'

    x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3], name='x')
    y = tf.placeholder(tf.int32, shape=[batch_size, ], name='y')
    sess = tf.InteractiveSession()
    feature, networks = net.c3d_biclstm(x, num_classes, False, False)
    feature = feature.outputs
    sess.run(tf.global_variables_initializer())
    load_params = tl.files.load_npz(name='%s/beihang_dataset_birnn_model_epoch_10.npz' % (model_prefix))
    tl.files.assign_params(sess, load_params, networks)

    print("restore done")

    while True:
        if not data_queue.empty():
            command = data_queue.get()
            # print 'The output queue size: {}'.format(data_queue.qsize())

            if command == "quit":
                break
            else:
                file_dir = '/home/e829/Documents/lxj/1128/data/' + 'train_' + str(num_files) + '/'
                if not os.path.isdir(file_dir):
                    os.mkdir(file_dir)
                save_file = os.path.join(os.getcwd(), 'data/train_' + str(num_files) + '/')
                url_path = 'http://192.168.1.104/train_' + str(num_files) + '/'

                nums = string.atoi(command[8:])
                for i in range(nums):
                    url = url_path + str(i + 1) + '.png'
                    path_name = save_file + str(i + 1) + '.png'
                    # 下载
                    download(url, path_name)

                # Data preprocessing
                current_path = os.getcwd()
                nni_path = os.path.join(current_path, 'data/')
                os.chdir(nni_path)
                list_nni = os.listdir(nni_path)
                list_nni.sort(key=lambda x: int(x[6:]))
                file_nni = list_nni[len(list_nni) - 2]

                img_path = os.path.join(nni_path, file_nni)

                starttime3 = time.clock()
                NNI(img_path)
                rename_lxj(img_path)
                os.chdir(current_path)

                with open("./train1.txt", "w") as f:

                    f.write(img_path+'/'+' '+str(32)+ ' '+str(0)+'\n')
                time_preprocess_done = time.clock()
                print ("the NNI time is %f" % (time_preprocess_done - starttime3))

                #lxj

                if flag1:
                    # print("please perform a new gesture!")
                    print("This may be a gesture,do you want to register a new class?")
                    print ("please input:[y/n]")
                    starttime4 = time.clock()
                    data_show_queue.put((-1, img_path,True))
                    last_label = -1
                    while True:
                        # print "while"
                        if not data_rec.empty():
                            input_data = data_rec.get()
                            if input_data == 'y':
                                new_class_feature = data_input('./train1.txt', networks, feature, x, y, sess)
                                # print("Register completed")
                                flag1 = False
                                print "Congratulations!Register a new class successfully!"
                                break
                            elif input_data == 'n':
                                # new_class_feature = new_class_feature
                                print "Please continue!"
                                break
                            else:
                                print "Invalid operation!"
                                break
                        endtime4=time.clock()
                        # print "time"
                        if data_rec.empty() and (endtime4-starttime4)>10.0:
                            print "Time is up! Please continue!"
                            break
                        # break
                        # else:
                        #     print "ok"

                    # new_class_feature=data_input('./train1.txt',networks,feature,x,y,sess)
                    # print("Register completed")
                    # flag1 = False

                else:
                    # print new_class_feature
                    test_features=data_input('./train1.txt',networks,feature,x,y,sess)
                    feature_extraction_done = time.clock()
                    print ("the preprocess and feature extraction time is %f" % (feature_extraction_done - time_preprocess_done))

                    distances = []
                    for i in range(len(new_class_feature)):
                        distances.append(distance_encludian(new_class_feature[i], test_features))
                    if min(distances) >= 0.423:
                        print("The test gesture does not belong to any class,do you want to register a new class?")
                        print ("please input:[y/n]")
                        # cv.destroyAllWindows()
                        starttime5 = time.clock()
                        # data_show_queue.put((-1, img_path))
                        if not last_label == -1:
                            data_show_queue.put((-1, img_path,True))
                            last_label = -1
                        else:
                            data_show_queue.put((-1, img_path,False))
                            last_label = -1
                        while True:
                            if not data_rec.empty():
                                input_data = data_rec.get()
                                if input_data == 'y':
                                    tmp = np.array(test_features)
                                    new_class_feature.append(tmp)
                                    print "Congratulations!Register a new class successfully!"
                                    break
                                elif input_data == 'n':
                                    new_class_feature = new_class_feature
                                    print "Please continue your test!"
                                    break
                                else:
                                    print "Invalid operation!"
                                    break
                            endtime5 = time.clock()
                            if data_rec.empty() and (endtime5 - starttime5) > 10.0:
                                print "Time is up! Please continue!"
                                break

                    else:
                        judge_distance_done = time.clock()
                        print ("the judge distance time is %f" % (judge_distance_done - feature_extraction_done))

                        pre_label = distances.index(min(distances))
                        print("The test gesture belongs to class %d" % (pre_label+1))
                        if last_label == -1:
                            data_show_queue.put((pre_label, img_path, True))
                        else:
                            data_show_queue.put((pre_label, img_path, False))
                        last_label = pre_label
                        endtime3 = time.clock()
                        print ("the predict time is %f" % (endtime3 - judge_distance_done))
                        print "total predict classes time: ", (endtime3 - starttime3)
                        print "-----------------------------------------------------------"

                num_files += 1
Пример #2
0
d = curtime.split(' ')[0]
t = curtime.split(' ')[1]
strtime = '%s%s%s-%s%s%s' %(d.split('-')[0],d.split('-')[1],d.split('-')[2], 
                            t.split(':')[0],t.split(':')[1],t.split(':')[2])

saved_stdout = sys.stdout
mem_log = cStringIO.StringIO()
sys.stdout = mem_log
logfile = './log/training_%s_%s.log' %(dataset_name, strtime)
log = open(logfile, 'w')

sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3], name='x')
y = tf.placeholder(tf.int32, shape=[batch_size, ], name='y')
  
_,networks = net.c3d_biclstm(x, num_classes, False, True)
networks_y = networks.outputs
networks_y_op = tf.argmax(tf.nn.softmax(networks_y), 1)
networks_cost = tl.cost.cross_entropy(networks_y, y)
correct_pred = tf.equal(tf.cast(networks_y_op, tf.int32), y)
networks_accu = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    
_,predictions = net.c3d_biclstm(x, num_classes, True, False)
prediction_y_op = tf.argmax(tf.nn.softmax(predictions.outputs),1)
prediction_accu = tf.reduce_mean(tf.cast(tf.equal(tf.cast(prediction_y_op, tf.int32), y), tf.float32))
  
l2_cost = tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[0]) + \
          tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[6]) + \
          tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[12]) + \
          tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[14]) + \
          tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[20]) + \
Пример #3
0
model_prefix = './'

curtime = '%s' % datetime.now()
d = curtime.split(' ')[0]
t = curtime.split(' ')[1]
strtime = '%s%s%s-%s%s%s' % (d.split('-')[0], d.split('-')[1], d.split('-')[2],
                             t.split(':')[0], t.split(':')[1], t.split(':')[2])

x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3], name='x')
y = tf.placeholder(tf.int32, shape=[
    batch_size,
], name='y')

sess = tf.InteractiveSession()

_, networks = net.c3d_biclstm(x, num_classes, False, False)
network_pred = tf.nn.softmax(networks.outputs)
network_y_op = tf.argmax(tf.nn.softmax(networks.outputs), 1)
network_accu = tf.reduce_mean(
    tf.cast(tf.equal(tf.cast(network_y_op, tf.int32), y), tf.float32))
networks_cost = tl.cost.cross_entropy(networks_y, y)

sess.run(tf.initialize_all_variables())

# RGB
testing_datalist = './dataset_splits/valid_rgb_list.txt'
X_test, y_test = data.load_video_list(testing_datalist)
X_teidx = np.asarray(np.arange(0, len(y_test)), dtype=np.int32)
y_test = np.asarray(y_test, dtype=np.int32)
rgb_prediction = np.zeros((len(y_test), num_classes), dtype=np.float32)
load_params = tl.files.load_npz(name='%s/isogr_rgb_birnn_model_epoch_10.npz' %
t = curtime.split(' ')[1]
strtime = '%s%s%s-%s%s%s' %(d.split('-')[0],d.split('-')[1],d.split('-')[2],
                            t.split(':')[0],t.split(':')[1],t.split(':')[2])

saved_stdout = sys.stdout
mem_log = cStringIO.StringIO()
sys.stdout = mem_log
logfile = './log/training_%s_%s.log' %(dataset_name, strtime)
log = open(logfile, 'w')

"""size of pictures"""
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3], name='x')
y = tf.placeholder(tf.int32, shape=[batch_size, ], name='y')
#training
_,networks = net.c3d_biclstm(x, num_classes, False, True)
networks_y = networks.outputs
networks_y_op = tf.argmax(tf.nn.softmax(networks_y), 1)
networks_cost = tl.cost.cross_entropy(networks_y, y,name='cost')
correct_pred = tf.equal(tf.cast(networks_y_op, tf.int32), y)
networks_accu = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('tr_accuracy', networks_accu)
#test
_,predictions = net.c3d_biclstm(x, num_classes, True, False)
prediction_y_op = tf.argmax(tf.nn.softmax(predictions.outputs),1)
prediction_accu = tf.reduce_mean(tf.cast(tf.equal(tf.cast(prediction_y_op, tf.int32), y), tf.float32))
tf.summary.scalar('te_accuracy', prediction_accu)

l2_cost = tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[0]) + \
          tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[6]) + \
          tf.contrib.layers.l2_regularizer(weight_decay)(networks.all_params[12]) + \
def predict_task():
    num_files = 1
    flag1 = True
    # print 'hhhhhh'

    seq_len = 32
    batch_size = 1
    num_classes = 19

    model_prefix = './'

    x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3],
                       name='x')
    y = tf.placeholder(tf.int32, shape=[
        batch_size,
    ], name='y')
    sess = tf.InteractiveSession()
    feature, networks = net.c3d_biclstm(x, num_classes, False, False)
    feature = feature.outputs
    sess.run(tf.global_variables_initializer())
    load_params = tl.files.load_npz(
        name='%s/beihang_dataset_birnn_model_epoch_10.npz' % (model_prefix))
    tl.files.assign_params(sess, load_params, networks)

    print("restore done")

    while True:
        if not data_queue.empty():
            command = data_queue.get()
            # print 'The output queue size: {}'.format(data_queue.qsize())

            if command == "quit":
                break
            else:
                file_dir = '/home/e829/Documents/lxj/1128/data/' + 'train_' + str(
                    num_files) + '/'
                if not os.path.isdir(file_dir):
                    os.mkdir(file_dir)
                save_file = os.path.join(os.getcwd(),
                                         'data/train_' + str(num_files) + '/')
                url_path = 'http://192.168.1.104/train_' + str(num_files) + '/'

                nums = string.atoi(command[8:])
                for i in range(nums):
                    url = url_path + str(i + 1) + '.png'
                    path_name = save_file + str(i + 1) + '.png'
                    # 下载
                    download(url, path_name)

                # Data preprocessing
                current_path = os.getcwd()
                nni_path = os.path.join(current_path, 'data/')
                os.chdir(nni_path)
                list_nni = os.listdir(nni_path)
                list_nni.sort(key=lambda x: int(x[6:]))
                file_nni = list_nni[len(list_nni) - 2]

                img_path = os.path.join(nni_path, file_nni)

                starttime3 = time.clock()
                NNI(img_path)
                rename_lxj(img_path)
                os.chdir(current_path)

                with open("./train1.txt", "w") as f:

                    f.write(img_path + '/' + ' ' + str(32) + ' ' + str(0) +
                            '\n')
                time_preprocess_done = time.clock()
                print("the NNI time is %f" %
                      (time_preprocess_done - starttime3))

                #lxj

                if flag1:
                    # print("please perform a new gesture!")
                    print(
                        "This may be a gesture,do you want to register a new class?"
                    )
                    print("please input:[y/n]")
                    starttime4 = time.clock()
                    data_show_queue.put((-1, img_path, True))
                    last_label = -1
                    while True:
                        # print "while"
                        if not data_rec.empty():
                            input_data = data_rec.get()
                            if input_data == 'y':
                                new_class_feature = data_input(
                                    './train1.txt', networks, feature, x, y,
                                    sess)
                                # print("Register completed")
                                flag1 = False
                                print "Congratulations!Register a new class successfully!"
                                break
                            elif input_data == 'n':
                                # new_class_feature = new_class_feature
                                print "Please continue!"
                                break
                            else:
                                print "Invalid operation!"
                                break
                        endtime4 = time.clock()
                        # print "time"
                        if data_rec.empty() and (endtime4 - starttime4) > 10.0:
                            print "Time is up! Please continue!"
                            break
                        # break
                        # else:
                        #     print "ok"

                    # new_class_feature=data_input('./train1.txt',networks,feature,x,y,sess)
                    # print("Register completed")
                    # flag1 = False

                else:
                    # print new_class_feature
                    test_features = data_input('./train1.txt', networks,
                                               feature, x, y, sess)
                    feature_extraction_done = time.clock()
                    print("the preprocess and feature extraction time is %f" %
                          (feature_extraction_done - time_preprocess_done))

                    distances = []
                    for i in range(len(new_class_feature)):
                        distances.append(
                            distance_encludian(new_class_feature[i],
                                               test_features))
                    if min(distances) >= 0.423:
                        print(
                            "The test gesture does not belong to any class,do you want to register a new class?"
                        )
                        print("please input:[y/n]")
                        # cv.destroyAllWindows()
                        starttime5 = time.clock()
                        # data_show_queue.put((-1, img_path))
                        if not last_label == -1:
                            data_show_queue.put((-1, img_path, True))
                            last_label = -1
                        else:
                            data_show_queue.put((-1, img_path, False))
                            last_label = -1
                        while True:
                            if not data_rec.empty():
                                input_data = data_rec.get()
                                if input_data == 'y':
                                    tmp = np.array(test_features)
                                    new_class_feature.append(tmp)
                                    print "Congratulations!Register a new class successfully!"
                                    break
                                elif input_data == 'n':
                                    new_class_feature = new_class_feature
                                    print "Please continue your test!"
                                    break
                                else:
                                    print "Invalid operation!"
                                    break
                            endtime5 = time.clock()
                            if data_rec.empty() and (endtime5 -
                                                     starttime5) > 10.0:
                                print "Time is up! Please continue!"
                                break

                    else:
                        judge_distance_done = time.clock()
                        print("the judge distance time is %f" %
                              (judge_distance_done - feature_extraction_done))

                        pre_label = distances.index(min(distances))
                        print("The test gesture belongs to class %d" %
                              (pre_label + 1))
                        if last_label == -1:
                            data_show_queue.put((pre_label, img_path, True))
                        else:
                            data_show_queue.put((pre_label, img_path, False))
                        last_label = pre_label
                        endtime3 = time.clock()
                        print("the predict time is %f" %
                              (endtime3 - judge_distance_done))
                        print "total predict classes time: ", (endtime3 -
                                                               starttime3)
                        print "-----------------------------------------------------------"

                num_files += 1
num_classes = 19
dataset_name = 'beihang_dataset'
model_prefix = './'

curtime = '%s' % datetime.now()
d = curtime.split(' ')[0]
t = curtime.split(' ')[1]
strtime = '%s%s%s-%s%s%s' % (d.split('-')[0], d.split('-')[1], d.split('-')[2],
                             t.split(':')[0], t.split(':')[1], t.split(':')[2])

x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3], name='x')
y = tf.placeholder(tf.int32, shape=[batch_size, ], name='y')

sess = tf.InteractiveSession()

feature, networks = net.c3d_biclstm(x, num_classes, False, False)
feature=feature.outputs
network_pred = tf.nn.softmax(networks.outputs)
network_y_op = tf.argmax(tf.nn.softmax(networks.outputs), 1)
pre_label=tf.cast(network_y_op,tf.int32)
network_accu = tf.reduce_mean(tf.cast(tf.equal(tf.cast(network_y_op, tf.int32), y), tf.float32))

sess.run(tf.global_variables_initializer())

# Depth
testing_datalist = './dataset_splits/test_samples.txt'
features=[]
labels=[]
true_labels=[]
X_test, y_test = data.load_video_list(testing_datalist)
X_teidx = np.asarray(np.arange(0, len(y_test)), dtype=np.int32)