예제 #1
0
def extract_pred_data(model, data_split):
    #Putanje do značajki za odgovarajuću podjelu podataka
    feat_path = glob.glob(os.path.sep.join([base_feats_dir, data_split, "*tfrecord"]))
    
    #Definiranje dataset-a, broj vremenskih koraka nadopunjuje se do najvećeg broja koraka u cijelom skupu(457)
    #kako bi kod izrade grafa imali optimiziranu implementaciju
    #Struktura izlaza dataset-a: video_id, num_images, num_labels, image_seq, labels 
    dataset = build_test_pipeline(feat_path, example2video(feature_dim=FEATURE_DIM, training=False),
                                  batch_size=BATCH_SIZE, padded_batch=True, 
                                  padded_shapes=([], [], [], [457, None], [457]), 
                                  padding_values=("", 0, 0, 0.,0))
    
    #Izvlačenja duljina nizova bez nadopuna
    y_lens = tf.concat([data_point[1] for data_point in dataset.as_numpy_iterator()], axis=0).numpy()
    
    #Vjerojatnost oznake u vremenskom koraku
    y_probs = tf.concat([model.predict(data_point[3]) for data_point in dataset.as_numpy_iterator()], axis=0)
   
    #Povjerenje najizglednije oznake
    y_conf = tf.reduce_max(y_probs, axis=-1).numpy()
    
    #Predikcija oznake
    y_pred = tf.argmax(y_probs, axis=-1, output_type=tf.int32).numpy()
    
    #Stvarna oznaka
    y_true = tf.concat([data_point[4] for data_point in dataset.as_numpy_iterator()], axis=0).numpy()
    
    return y_lens, y_conf, y_pred, y_true
예제 #2
0
import tensorflow as tf
from phd_lib.data_pipeline.pipe_builders import build_test_pipeline
from phd_lib.data_pipeline.tfrecord_helpers import example2video
from phd_lib.models.test_models import SegmentationLoss_v2
from phd_lib.models.ms_tcn_model import build_ms_tcn_model
from tensorflow.keras.utils import to_categorical
import os
import glob

#%%Podatci
#Putanja do opažanja
path = r"D:\Phd_data\Video_feats_tfrecords\feat_extraction\HE\train"
train_tfs = glob.glob(os.path.join(path, "*.tfrecord"))

#%%Dva batch-a sa dva opažanja po batch-u
ds_no_pad = build_test_pipeline(train_tfs, example2video(2048), 2).take(2)
ds_padded = build_test_pipeline(train_tfs,
                                example2video(2048),
                                2,
                                padded_shapes=([457, None], [457])).take(2)

for element in ds_no_pad.take(2):
    pred_1 = element[0]
    y_1 = element[1]
for element in ds_padded.take(2):
    pred_2 = element[0]
    y_2 = element[1]

y_1_oh = to_categorical(y_1)
y_2_oh = to_categorical(y_2)
#%%Izgradi 2 modela, sa i bez maskiranja SA JEDNIM IZLAZOM i kompaliraj ih
예제 #3
0
    filenames = glob.glob(os.path.join(ulazni_dir, 
                                       kadar, 
                                       data_split, 
                                       "*.tfrecord"), recursive=True)
    return filenames

#Lokacija ulaznih podataka - potrebno je napraviti učenje za kadar HE i Fokus
train_filenames = get_feature_files(FEAT_TYPE, KADAR, "train")
val_filenames = get_feature_files(FEAT_TYPE, KADAR, "val")

#Generiranje train i val dataseta, za konkatenaciju kadrova dimenzija značajki je 4096, za ostale kadrove 2048
feature_dim = {"HE": 2048, "Fokus": 2048, "concat": 4096}

#Oba dataset-a su nadopunjena kako bi mogli koristi batch size veći od 1
train_dataset = build_train_pipeline(train_filenames, 
                                     example2video(feature_dim=feature_dim[KADAR], training=True), 
                                     batch_size=BATCH_SIZE, padded_batch=True)

val_dataset = build_train_pipeline(val_filenames, 
                                   example2video(feature_dim=feature_dim[KADAR], training=True), 
                                   batch_size=BATCH_SIZE, padded_batch=True)

#%%Definiranje modela za učenje
print("[INFO] priprema modela")
tf.keras.backend.clear_session()
#Komponente modela
if MODEL_TYPE == "CONV":
    model = build_ms_tcn_model(input_shape=(None, feature_dim[KADAR]), num_layers_PG=5, R_stages=1, num_layers_R=5, filters=64, training=True, dropout_rate=0.5, shared_R=False)
else:
    model = build_lstm_model(input_shape=(None, feature_dim[KADAR]), mask_value=0., bidirect=False, num_lstm_layers=1, 
                             lstm_units=256, lstm_dropout=0.5, lstm_recurrent_dropout=0., 
예제 #4
0
                  "\n",
                  "MAD vremena trajanja aktivnosti:\n",
                  f"MAD: {MAD}\n",
                  "\n"
                  "Segmentacijski F1 rezultat uz minimalan prag preklapanja od 15, 25 i 50%\n", 
                  f"F1@10: {f1_scores[0]}\n",
                  f"F1@25: {f1_scores[1]}\n",
                  f"F1@50: {f1_scores[2]}\n",
                  "\n",
                  "Srednja prosječna preciznost uz minimalan prag preklapanja od 15, 25 i 50%\n",
                  f"mAP@10: {mAP_scores[0]}\n",
                  f"mAP@25: {mAP_scores[1]}\n",
                  f"mAP@50: {mAP_scores[2]}\n",
                  "\n"])

#%%Prikaz rezultata
with open(full_metrics_path, "r") as f:
    for line in f:
        print(line.rstrip())
print()     
print("Rezultati su pohranjeni u datoteku: ", full_metrics_path)

#%%Standardni testovi
test=0
if test:
    #Testiranje rezultata izračuna metrike
    feat_path = glob.glob(os.path.sep.join([base_feats_dir, args["data_split"], "*tfrecord"]))
    dataset = build_test_pipeline(feat_path, example2video(feature_dim=FEATURE_DIM, training=True),
                                  batch_size=BATCH_SIZE, padded_batch=True, 
                                  padded_shapes=([457, None], [457]))
    print(model.evaluate(dataset))
예제 #5
0
    ulazni_dir = feat_type_dict[feat_type]
    filenames = glob.glob(os.path.join(ulazni_dir, kadar, data_split,
                                       "*.tfrecord"),
                          recursive=True)
    return filenames


#Lokacija ulaznih podataka
feat_path = get_feature_files(FEAT_TYPE, KADAR, data_split=args["data_split"])

#Definiranje dataset-a, broj vremenskih koraka nadopunjuje se do najvećeg broja koraka u cijelom skupu(457)
#kako bi kod izrade grafa imali optimiziranu implementaciju
#Struktura izlaza dataset-a: video_id, num_images, num_labels, image_seq, labels
dataset = build_test_pipeline(feat_path,
                              example2video(feature_dim=FEATURE_DIM,
                                            training=False),
                              batch_size=BATCH_SIZE,
                              padded_batch=True,
                              padded_shapes=([], [], [], [457, None], [457]),
                              padding_values=("", 0, 0, 0., 0))

#Izvlačenja Video_id-a
y_id = tf.concat([data_point[0] for data_point in dataset.as_numpy_iterator()],
                 axis=0).numpy()

#Izvlačenja duljina nizova bez nadopuna
y_lens = tf.concat(
    [data_point[1] for data_point in dataset.as_numpy_iterator()],
    axis=0).numpy()

#Vjerojatnost oznake u vremenskom koraku