Esempio n. 1
0
def main_function():
    # set the skeleton detector. The two inputs are: operation model and image size
    Skeleton_Detector = uti_openpose.Skeleton_Detector(OPENPOSE_MODEL,
                                                       OPENPOSE_IMAGE_SIZE)
    # track_and_label = Skeleton_Tracker()
    Images_Loader = uti_commons.Read_Valid_Images(
        images_folder=SRC_IMAGES_FOLDER,
        valid_images_txt=SRC_IMAGES_DESCRIPTION_TXT,
        image_filename_format=IMAGE_FILE_NAME_FORMAT)

    # Set the images displayer
    Images_Displayer = uti_images_io.Image_Displayer()

    # Create the folder for output, if the have not been created
    os.makedirs(DST_DETECTED_SKELETONS_FOLDER, exist_ok=True)
    os.makedirs(DST_IMAGES_WITH_DETECTED_SKELETONS, exist_ok=True)
    # create a list to store the possible invalid image indexs
    iInvalid_Counter = 0
    list_of_invalid = []

    iTotal_Number_of_Images = Images_Loader._num_images
    for iImages_Counter in range(iTotal_Number_of_Images):
        # Load training images
        Image, sImage_Info = Images_Loader.read_image()
        # detect humans
        Humans = Skeleton_Detector.detect(Image)

        # display detected skeletons on images
        Image_DST = Image.copy()
        Skeleton_Detector.draw(Image_DST, Humans)
        Images_Displayer.display(Image_DST)

        # save skeletons coordinates to txt files and the sImage_Info with it at the begining
        SKELETONS, SCALE_H = Skeleton_Detector.humans_to_skeletons_list(Humans)
        # SKELETONS_DICT = track_and_label.track(SKELETONS)

        SKELETONS_DIR = uti_tracker.delete_invalid_skeletons_from_lists(
            SKELETONS)
        # add the label infos to this skeleton
        SKELETONS_DIR.insert(IMAGES_INFO_INDEX, sImage_Info)

        sFile_Name = SKELETON_FILE_NAME_FORMAT.format(iImages_Counter)
        uti_commons.save_listlist(DST_DETECTED_SKELETONS_FOLDER + sFile_Name,
                                  SKELETONS_DIR)

        sImage_Name = IMAGE_FILE_NAME_FORMAT.format(iImages_Counter)
        cv2.imwrite(DST_IMAGES_WITH_DETECTED_SKELETONS + sImage_Name,
                    Image_DST)
        # the length of this lists should be 2 at this point, if not, then it#s invalid image
        if len(SKELETONS_DIR) != 2:

            iInvalid_Counter += 1
            list_of_invalid.append(sImage_Name)
        uti_commons.save_listlist(INVALID_IMAGES_FILE, list_of_invalid)

        print(f'{iImages_Counter}/{iTotal_Number_of_Images} th image '
              f'has {len(SKELETONS_DIR) - 1} people in it')
    print('Programm End')
def Test_Save_Raw_Skeleton_Data_v1():
    '''Try to extract and display the skeleton data and save it in txt files.'''
    fMax_Framerate = 10
    iCamera_Index = 0
    Data_Source = uti_images_io.Read_Images_From_Webcam(fMax_Framerate,iCamera_Index)
    Image_Window = uti_images_io.Image_Displayer()
    Skeleton_Detector = uti_openpose.Skeleton_Detector("mobilenet_thin", "432x368")
    # Temporal path and format, use config.yaml file to define it later.
    SKELETON_X_FOLDER = ('/home/zhaj/tf_test/Human_Action_Recognition/Data_Skeletons/Test_Skeleton_X/')
    SKELETON_Y_FOLDER = ('/home/zhaj/tf_test/Human_Action_Recognition/Data_Skeletons/Test_Skeleton_Y/')
    SKELETON_X_FOLDER_DIR = ('/home/zhaj/tf_test/Human_Action_Recognition/Data_Skeletons/Test_Skeleton_X_DIR/')
    SKELETON_Y_FOLDER_DIR = ('/home/zhaj/tf_test/Human_Action_Recognition/Data_Skeletons/Test_Skeleton_Y_DIR/')
    SKELETON_X_FOLDER_VEL = ('/home/zhaj/tf_test/Human_Action_Recognition/Data_Skeletons/Test_Skeleton_X_VEL/')
    SKELETON_Y_FOLDER_VEL = ('/home/zhaj/tf_test/Human_Action_Recognition/Data_Skeletons/Test_Skeleton_Y_VEL/')
    DST_VIZ_IMGS_FOLDER = ('/home/zhaj/tf_test/Human_Action_Recognition/Data_Images/Test_Images/')
    DST_IMGS_FOLDER = ('/home/zhaj/tf_test/Human_Action_Recognition/Data_Images/Test_Images_ORI/')
    SKELETON_FILENAME_FORMAT = ('{:05d}.txt')
    IMG_FILENAME_FORMAT = ('{:05d}.jpg')
    import itertools
    for i in itertools.count():
        img = Data_Source.Read_Image()
        if img is None:
            break
        print(f"Read {i}th Frame from Video...")

        Detected_Human = Skeleton_Detector.detect(img)
        Image_Output = img.copy()
        Skeleton_Detector.draw(Image_Output, Detected_Human)
        Image_Window.display(Image_Output)
        Skeleton_X, Skeleton_Y, Scale_h = Skeleton_Detector.humans_to_skels_list(Detected_Human)

        


        
        if Skeleton_X and Skeleton_Y: #only save no-empty lists 
            txt_filename = SKELETON_FILENAME_FORMAT.format(i)
            uti_commons.save_listlist(SKELETON_X_FOLDER + txt_filename, Skeleton_X)
            uti_commons.save_listlist(SKELETON_Y_FOLDER + txt_filename, Skeleton_Y)
            Skeleton_X_DIR = uti_skeletons_io.Rebuild_Skeletons(Skeleton_X)
            Skeleton_Y_DIR = uti_skeletons_io.Rebuild_Skeletons(Skeleton_Y)
            uti_commons.save_listlist(SKELETON_X_FOLDER_DIR + txt_filename, Skeleton_X_DIR)
            uti_commons.save_listlist(SKELETON_Y_FOLDER_DIR + txt_filename, Skeleton_Y_DIR)

            print(f"Saved {i}th Skeleton Data from Webcam...")
            jpg_filename = IMG_FILENAME_FORMAT.format(i)
            cv2.imwrite(
            DST_IMGS_FOLDER + jpg_filename, img)
            cv2.imwrite(
            DST_VIZ_IMGS_FOLDER + jpg_filename, Image_Output)
            print(f"Saved {i}th Image with Skeleton Data from Webcam...")


    print("Program ends")
def main_function():

    # initialize the frames counter at -1, so the first incomming frames is 0
    iFrames_Counter = -1

    # initialize the skeleton detector
    skeleton_detector = uti_openpose.Skeleton_Detector(OPENPOSE_MODEL,
                                                       OPENPOSE_IMAGE_SIZE)

    # load the trained two stream model
    network = tf.keras.models.load_model(MODEL_PATH)

    # select the data source
    # images_loader = uti_images_io.Read_Images_From_Video(VIDEO_PATH_SRC)
    # images_loader = uti_images_io.Read_Images_From_Webcam(10, 0)
    images_loader = uti_images_io.Read_Images_From_Folder(IMAGE_PATH)
    # initialize the skeleton detector
    Images_Displayer = uti_images_io.Image_Displayer()

    # initialize the skeleton detector
    Featurs_Generator = uti_features_extraction.Features_Generator_Multiple(
        FEATURE_WINDOW_SIZE)

    # initialize Multiperson Tracker
    Local_Tracker = uti_tracker.Tracker()

    # Recorder = uti_images_io.Video_Writer(TEST_OUTPUTS + 'TEST_'  + uti_commons.get_time() + '/video', 10)
    Timestample = uti_commons.get_time()

    TEST_RESULTS_FOLDER = TEST_OUTPUTS + 'TEST_Webcam' + Timestample + '/scores/'
    TEST_SKELETONS_FOLDER = TEST_OUTPUTS + 'TEST_Webcam' + Timestample + '/skeletons/'
    TEST_IMAGES_FOLDER = TEST_OUTPUTS + 'TEST_Webcam' + Timestample + '/images/'

    if not os.path.exists(TEST_IMAGES_FOLDER):
        os.makedirs(TEST_IMAGES_FOLDER)
    #################################################################################################
    # Will always be ture, if the webcam is pluged in
    while images_loader.Image_Captured():

        # iterate the frames counter by 1
        iFrames_Counter += 1

        # grab frames from data source
        image_src = images_loader.Read_Image()

        image_display = image_src.copy()

        # get detected human(s) from openpose
        humans = skeleton_detector.detect(image_src)

        # convert human(s) to 2d coordinates in a list(of lists)
        skeletons_lists_src, scale_h = skeleton_detector.humans_to_skeletons_list(
            humans)

        # delete invalid skeletons from lists
        skeletons_lists = uti_tracker.delete_invalid_skeletons_from_lists(
            skeletons_lists_src)

        sText_Name = SKELETON_FILE_NAME_FORMAT.format(iFrames_Counter)

        sImage_Name = IMAGE_FILE_NAME_FORMAT.format(iFrames_Counter)

        uti_commons.save_listlist(TEST_SKELETONS_FOLDER + sText_Name,
                                  skeletons_lists_src)

        skeleton_detector.draw(image_display, humans)

        # sort and track humans in frames
        skeletons_dict = Local_Tracker.track(skeletons_lists)

        if len(skeletons_dict) >= 1:
            # get human ids and skeletons seperatly
            human_ids, skeletons_tracked_lists = map(
                list, zip(*skeletons_dict.items()))

            skeletons_tracked_lists = uti_features_extraction.rebuild_skeleton_joint_order(
                skeletons_tracked_lists)

            # uti_images_io.draw_bounding_box_for_multiple_person_on_image(image_display, skeletons_tracked_lists, scale_h)

            status_list, features_p, features_v = Featurs_Generator.calculate_features_multiple(
                human_ids, skeletons_tracked_lists)

            result_dict = predict_action_class(human_ids, status_list,
                                               features_p, features_v, network)

            if len(result_dict) > 0:

                values_view = result_dict.values()

                value_iterator = iter(values_view)

                first_value = next(value_iterator)

                # result_str = str(result_dict)

                # np.savetxt(TEST_RESULTS_FOLDER + sText_Name, result_dict)
                uti_commons.save_result_dict(TEST_RESULTS_FOLDER + sText_Name,
                                             result_dict)

                # only draw all the scores of the first prediction on image
                uti_images_io.draw_scores_for_one_person_on_image(
                    image_display, first_value)

            uti_images_io.draw_result_images(image_display, human_ids,
                                             skeletons_tracked_lists,
                                             result_dict, scale_h,
                                             ACTION_CLASSES)

        cv2.imwrite(TEST_IMAGES_FOLDER + sImage_Name, image_display)
        Images_Displayer.display(image_display)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # Remeber to kill the thread, or yyou can't quit this function properly
    images_loader.Stop()

    print('Finished')
Esempio n. 4
0
def train_model_on_batch_v1(network):
    adam = tf.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # original setup from paper

    SGD = tf.keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)

    RMS = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)

    # network.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
    
    network.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])

    network.summary()

    # network.load_weights(weight_path)

    # tf.keras.utils.plot_model(network, to_file= FIGURE_PATH + 'Model.png')

    batch_num = 0
    model_save_acc = 0
    all_train_accuracy = []
    all_train_loss = []
    all_tst_accuracy = []
    all_tst_loss = []

    # load dataset to memory, temp.
    train_position, train_velocity, train_labels = load_train_datasets(FEATURES_TRAIN)
    test_position, test_velocity, test_labels = load_test_datasets(FEATURES_TEST)

    train_data = uti_data_generator.Data_Generator(FEATURES_TRAIN, BATCH_SIZE)
    train_data_sum = train_data.get_train_data_sum()
    train_data_index = np.arange(0, train_data_sum)
    train_data_cursors = train_data.batch_cursors(train_data_sum)
    index_num = len(train_data_cursors)

    # test_position = np.expand_dims(test_position, axis=0)
    # test_velocity = np.expand_dims(test_velocity, axis=0)
    test_data = uti_data_generator.Data_Generator(FEATURES_TEST, BATCH_SIZE)
    test_data_sum = test_data.get_test_data_sum()
    test_data_index = np.arange(0, test_data_sum)
    test_data_cursors = test_data.batch_cursors(test_data_sum)
    test_index_num = len(test_data_cursors)

    for epoch in range(EPOCHS):
        accuracy_list = []
        loss_list = []
        test_accuracy_list = []
        test_loss_list = []
        print(epoch + 1, ' epoch is beginning......')
        '''
        '''
        for ind in range(index_num):
            batch_num += 1
            up_data_0, down_data_0, train_labels_0 \
                = train_data.generate_batch_data_v1(train_data_index, train_data_cursors[ind], train_position, train_velocity, train_labels)
            up_data_1, down_data_1, train_labels_1 \
                = train_data.generate_batch_data_v1(train_data_index, train_data_cursors[ind], train_position, train_velocity, train_labels)
            train_loss = network.train_on_batch([up_data_0, up_data_1, down_data_0, down_data_1], train_labels_0)
            accuracy_list.append(train_loss[1])
            loss_list.append(train_loss[0])
            if batch_num % 50 == 0:
                print('the {:03d} batch: loss: {:.3f}  accuracy: {:.3%}'.format(batch_num, train_loss[0], train_loss[1]))

        epoch_accuracy = sum(accuracy_list) / len(accuracy_list)
        epoch_loss = sum(loss_list) / len(loss_list)
        all_train_accuracy.append(epoch_accuracy)
        all_train_loss.append(epoch_loss)

        print('the {:03d} epoch: mean loss: {:.3f}    mean accuracy: {:.3%}'.format(epoch + 1, epoch_loss, epoch_accuracy))



        if epoch >= 1:
            tst_accuracy_list = []
            tst_loss_list = []
            for num in range(test_index_num):
                tst_up_0, tst_down_0, tst_labels_0 \
                    = test_data.get_test_batch(test_data_index, test_data_cursors[num], test_position, test_velocity, test_labels)
                tst_up_1, tst_down_1, tst_labels_1 \
                    = test_data.get_test_batch(test_data_index, test_data_cursors[num], test_position, test_velocity, test_labels)
                tst_loss = network.test_on_batch([tst_up_0, tst_up_1, tst_down_0, tst_down_1], tst_labels_0)
                
                # test_result = network.evaluate([tst_up_0, tst_up_1, tst_down_0, tst_down_1], tst_labels_0)

                
                tst_loss_list.append(tst_loss[0])
                tst_accuracy_list.append(tst_loss[1])
            tst_accuracy = sum(tst_accuracy_list) / len(tst_accuracy_list)
            tst_loss_output = sum(tst_loss_list) / len(tst_loss_list)

            all_tst_accuracy.append(tst_accuracy)
            all_tst_loss.append(tst_loss_output)
            print('The test data accuracy: {:.3%}'.format(tst_accuracy))
            if tst_accuracy > model_save_acc:
                network.save(MODEL_PATH)
                model_save_acc = tst_accuracy
                print('Model Saved')

    uti_commons.save_listlist(TXT_FILE_PATH + 'all_train_loss.txt', all_train_loss)
    uti_commons.save_listlist(TXT_FILE_PATH + 'all_train_acc.txt', all_train_accuracy)
    uti_commons.save_listlist(TXT_FILE_PATH + 'all_test_loss.txt', all_tst_loss)
    uti_commons.save_listlist(TXT_FILE_PATH + 'all_test_acc.txt', all_tst_accuracy)
    
    fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
    fig.suptitle('Training Metrics')

    axes[0].set_ylabel('Loss', fontsize=14)
    axes[0].plot(all_train_loss)

    axes[1].set_ylabel('Accuracy', fontsize=14)
    axes[1].set_xlabel('Epoch', fontsize=14)
    axes[1].plot(all_train_accuracy)
    plt.savefig(FIGURE_PATH + '35frame_train_cc.png')

    fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
    fig.suptitle('Test Metrics')

    axes[0].set_ylabel('Loss', fontsize=14)
    axes[0].plot(all_tst_loss)

    axes[1].set_ylabel('Accuracy', fontsize=14)
    axes[1].set_xlabel('Epoch', fontsize=14)
    axes[1].plot(all_tst_accuracy)
    plt.savefig(FIGURE_PATH + '35frame_test_cc.png')