Esempio n. 1
0
def test_process(model, pre_file, data_type, epochs=20, dataset='ucf101', 
    classes=101, cross_index=1, seq_len=3, batch_size=16, split_sequence=False):

    model.load_weights('weights/{}_{}e_cr{}.h5'.format(pre_file,epochs,cross_index))

    out_file = r'{}database/{}-test{}-split{}.pickle'.format(data_output_path,dataset,seq_len,cross_index)
    with open(out_file,'rb') as f2:
        keys = pickle.load(f2)
    
    len_samples = len(keys)

    print('-'*40)
    print('{} testing'.format(pre_file))
    print 'Number samples: {}'.format(len_samples)
    print('-'*40)

    Y_test = gd.getClassData(keys)
    steps = int(np.ceil(len_samples*1.0/batch_size))

    time_start = time.time()

    y_pred = model.predict_generator(
        gd.getTrainData(
            keys=keys,batch_size=batch_size,dataset=dataset,classes=classes,train='test',data_type=data_type,split_sequence=split_sequence), 
        max_queue_size=20, 
        steps=steps)

    run_time = time.time() - time_start

    with open('results/{}_{}e_cr{}.pickle'.format(pre_file,epochs,cross_index),'wb') as fw3:
        pickle.dump([y_pred, Y_test],fw3)

    y_classes = y_pred.argmax(axis=-1)
    print(classification_report(Y_test, y_classes, digits=6))
    print 'Run time: {}'.format(run_time)
    print('-' * 40)
    print 'MobileNet Optical #{} stream only: Testing'.format(opt_size)
    print('-' * 40)
    print 'Number samples: {}'.format(len_samples)

    # score = result_model.evaluate_generator(
    #     gd.getTrainData(
    #         keys,
    #         batch_size,
    #         classes,
    #         2,
    #         'test',
    #         opt_size),
    #     max_queue_size=3,
    #     steps=len_samples/batch_size
    # )
    # print('Test loss:', score[0])
    # print('Test accuracy:', score[1])

    Y_test = gd.getClassData(keys)
    y_pred = result_model.predict_generator(
        gd.getTrainData(keys, batch_size, classes, 2, 'test', opt_size),
        max_queue_size=3,
        steps=int(np.ceil(len_samples * 1.0 / batch_size)))
    y_classes = y_pred.argmax(axis=-1)
    print 'Score per samples'
    print(classification_report(Y_test, y_classes, digits=6))

    print 'Score per video'
    print(gd.getScorePerVideo(y_pred, keys))
Esempio n. 3
0
    with open(out_file, 'rb') as f2:
        keys = pickle.load(f2)
    # else:
    #     with open(out_file,'rb') as f1:
    #         keys_cross = pickle.load(f1)
    #     keys_train, keys = gd.get_data_cross_validation(keys_cross,cross_index)

    len_samples = len(keys)

    print('-' * 40)
    print 'MobileNet Temporal{} stream: Testing'.format(opt_size)
    print('-' * 40)
    print 'Number samples: {}'.format(len_samples)

    if server:
        Y_test = gd.getClassData(keys)
        steps = int(np.ceil(len_samples * 1.0 / batch_size))
    else:
        Y_test = gd.getClassData(keys, 10 * batch_size)
        steps = 10

    time_start = time.time()

    y_pred = result_model.predict_generator(gd.getTrainData(
        keys=keys,
        batch_size=batch_size,
        classes=classes,
        mode=1,
        train='test',
        opt_size=[opt_size],
        seq=True),
Esempio n. 4
0
def test_process(model,
                 pre_file,
                 data_type,
                 epochs=20,
                 dataset='ucf101',
                 classes=101,
                 cross_index=1,
                 seq_len=3,
                 batch_size=1,
                 split_sequence=False):

    print(
        glob.glob('weights/' + pre_file + '-{:02d}-*.hdf5'.format(epochs))[0])
    model.load_weights(
        glob.glob('weights/' + pre_file + '-{:02d}-*.hdf5'.format(epochs))[0])

    y_pred = np.zeros((3783, 101))

    for i in range(9):
        out_file = r'{}database/{}-test{}-split{}-test0'.format(
            data_output_path, dataset, seq_len,
            cross_index) + str(i) + '.pickle'
        with open(out_file, 'rb') as f2:
            keys = pickle.load(f2)
        len_samples = len(keys)

        print('-' * 40)
        print('{} testing'.format(pre_file))
        print('Number samples: {}'.format(len_samples))
        print('-' * 40)

        Y_test = gd.getClassData(keys)
        steps = int(np.ceil(len_samples * 1.0 / batch_size))

        time_start = time.time()

        y_ = model.predict_generator(gd.getTrainData(
            keys=keys,
            batch_size=batch_size,
            dataset=dataset,
            classes=classes,
            train='test',
            data_type=data_type,
            split_sequence=split_sequence),
                                     max_queue_size=20,
                                     steps=steps)

        run_time = time.time() - time_start

        #y_ = y[2]
        y_p = []
        for i in range(0, 10 * len_samples, 10):
            temp = np.sum(y_[i:i + 9], axis=0) * 1.0 / 10
            y_p.append(temp)
        y_pred = y_pred + y_p

    with open(
            'results/{}_{}e_cr{}.pickle'.format(pre_file, epochs, cross_index),
            'wb') as fw3:
        pickle.dump([y_pred, Y_test], fw3)

    print(np.array(y_pred).shape)
    y_classes = np.array(y_pred).argmax(axis=-1)
    print(classification_report(Y_test, y_classes, digits=6))
    confusion_mtx = confusion_matrix(Y_test, y_classes)
    np.savetxt('results/{}_{}e_cr{}.csv'.format(pre_file, epochs, cross_index),
               confusion_mtx,
               delimiter=",")
    print('Run time: {}'.format(run_time))