コード例 #1
0
def isoFusionTestImageGenerator(filepath_1, filepath_2, batch_size, seq_len,
                                num_classes):

    # filepath_1:RGB, filepath_2:Flow
    X_train_1, y_train_1 = data.load_iso_video_list(filepath_1)
    X_train_2, y_train_2 = data.load_iso_video_list(filepath_2)
    X_tridx_1 = np.asarray(np.arange(0, len(y_train_1)), dtype=np.int32)
    X_tridx_2 = np.asarray(np.arange(0, len(y_train_2)), dtype=np.int32)
    y_train_1 = np.asarray(y_train_1, dtype=np.int32)
    y_train_2 = np.asarray(y_train_2, dtype=np.int32)

    # RGB
    while 1:
        for X_indices, y_label_t_1 in minibatches(X_tridx_1,
                                                  y_train_1,
                                                  batch_size,
                                                  shuffle=True):
            # minibatches(X_tridx_2, y_train_2, batch_size, shuffle=True)):

            # Read data for each batch
            image_path_1 = []
            image_fcnt_1 = []
            image_path_2 = []
            image_fcnt_2 = []
            image_olen = []
            image_start = []
            is_training = []

            for data_a in range(batch_size):
                X_index_a = X_indices[data_a]
                key_str = '%06d' % X_index_a
                image_path_1.append(X_train_1[key_str]['videopath'])
                image_fcnt_1.append(X_train_1[key_str]['framecnt'])
                image_path_2.append(X_train_2[key_str]['videopath'])
                image_fcnt_2.append(X_train_2[key_str]['framecnt'])
                image_olen.append(seq_len)
                image_start.append(1)
                is_training.append(False)  # Testing
            image_info_1 = zip(image_path_1, image_fcnt_1, image_olen,
                               image_start, is_training)
            image_info_2 = zip(image_path_2, image_fcnt_2, image_olen,
                               image_start, is_training)

            X_data_t_1 = threading_data([_ for _ in image_info_1],
                                        data.prepare_iso_rgb_data)
            X_data_t_2 = threading_data([_ for _ in image_info_2],
                                        data.prepare_iso_flow_data)
            y_hot_label_t_1 = keras.utils.to_categorical(
                y_label_t_1, num_classes=num_classes)
            # import keras.backend as K
            # X_data_t = keras.layers.Concatenate(axis=-1)([X_data_t_1, X_data_t_2])
            # print(X_data_t_1.shape)
            # print(X_data_t_2.shape)
            # X_data_t = tf.concat([X_data_t_1, X_data_t_2], axis=-1)

            # print('*************')
            yield ([X_data_t_1, X_data_t_2], y_hot_label_t_1)
コード例 #2
0
def jesterTestImageGenerator(filepath, batch_size, seq_len, num_classes,
                             modality):
    X_test, y_test = data.load_iso_video_list(filepath)
    X_teidx = np.asarray(np.arange(0, len(y_test)), dtype=np.int32)
    y_test = np.asarray(y_test, dtype=np.int32)
    while 1:
        for X_indices, y_label_t in minibatches(X_teidx,
                                                y_test,
                                                batch_size,
                                                shuffle=False):
            # Read data for each batch
            image_path = []
            image_fcnt = []
            image_olen = []
            is_training = []
            for data_a in range(batch_size):
                X_index_a = X_indices[data_a]
                key_str = '%06d' % X_index_a
                image_path.append(X_test[key_str]['videopath'])
                image_fcnt.append(X_test[key_str]['framecnt'])
                image_olen.append(seq_len)
                is_training.append(False)  # Testing
            image_info = zip(image_path, image_fcnt, image_olen, is_training)
            if modality == 0:  #RGB
                X_data_t = threading_data([_ for _ in image_info],
                                          data.prepare_jester_rgb_data)
            if modality == 2:  #Flow
                X_data_t = threading_data([_ for _ in image_info],
                                          data.prepare_jester_flow_data)
            y_hot_label_t = keras.utils.to_categorical(y_label_t,
                                                       num_classes=num_classes)
            yield (X_data_t, y_hot_label_t)
コード例 #3
0
ファイル: datagen.py プロジェクト: NIDONGDEA/ContinuousGR
def isoTrainImageGenerator(filepath, batch_size, depth, num_classes, modality):
  X_train,y_train = data.load_iso_video_list(filepath)
  X_tridx = np.asarray(np.arange(0, len(y_train)), dtype=np.int32)
  y_train = np.asarray(y_train, dtype=np.int32)
  while 1:
    for X_indices, y_label_t in minibatches(X_tridx, y_train, 
                                            batch_size, shuffle=True):
      # Read data for each batch      
      image_path = []
      image_fcnt = []
      image_olen = []
      image_start = []
      is_training = []
      for data_a in range(batch_size):
        X_index_a = X_indices[data_a]
        key_str = '%06d' % X_index_a
        image_path.append(X_train[key_str]['videopath'])
        image_fcnt.append(X_train[key_str]['framecnt'])
        image_olen.append(depth)
        image_start.append(1)
        is_training.append(True) # Training
      image_info = zip(image_path,image_fcnt,image_olen,image_start,is_training)
      if modality==0: #RGB
        X_data_t = threading_data([_ for _ in image_info], 
                                data.prepare_iso_rgb_data)
      elif modality==1: #Depth
        X_data_t = threading_data([_ for _ in image_info], 
                                data.prepare_iso_depth_data)
      elif modality==2: #Flow
        X_data_t = threading_data([_ for _ in image_info], 
                                data.prepare_iso_flow_data)     
      y_hot_label_t = keras.utils.to_categorical(y_label_t, num_classes=num_classes)
      yield (X_data_t, y_hot_label_t)
コード例 #4
0
                             kernel_regularizer=l2(weight_decay),
                             name='Classes')(flatten)
outputs = keras.layers.Activation('softmax', name='Output')(classes)
model = keras.models.Model(inputs=inputs, outputs=outputs)
optimizer = keras.optimizers.SGD(lr=0.001,
                                 decay=0,
                                 momentum=0.9,
                                 nesterov=False)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

if cfg_type == ATTENTIONX:
    pretrained_model = '%s/isogr_rgb_attenxclstm_weights.h5' % model_prefix
elif cfg_type == ATTENTIONI:
    pretrained_model = '%s/isogr_rgb_atteniclstm_weights.h5' % model_prefix
elif cfg_type == ATTENTIONO:
    pretrained_model = '%s/isogr_rgb_attenoclstm_weights.h5' % model_prefix
print 'Loading pretrained model from %s' % pretrained_model
model.load_weights(pretrained_model, by_name=False)
for i in range(len(model.trainable_weights)):
    print model.trainable_weights[i]

_, test_labels = data.load_iso_video_list(testing_datalist)
test_steps = len(test_labels) / batch_size
print model.evaluate_generator(
    isoTestImageGenerator(testing_datalist, batch_size, seq_len, num_classes,
                          cfg_modality),
    steps=test_steps,
)
コード例 #5
0
    init_epoch = 0
    seq_len = 32
    batch_size = 4
    num_classes = 249
    dataset_name = 'isogr_Fusion'
    RGB_training_datalist = './dataset_splits/IsoGD/train_rgb_list.txt'
    RGB_testing_datalist = './dataset_splits/IsoGD/valid_rgb_list.txt'
    Flow_training_datalist = './dataset_splits/IsoGD/train_flow_list.txt'
    Flow_testing_datalist = './dataset_splits/IsoGD/valid_flow_list.txt'

weight_decay = 0.00005
model_prefix = './models/Fusion/'
weights_file = '%s/%s_weights.{epoch:02d}-{val_loss:.2f}.h5' % (model_prefix,
                                                                dataset_name)

_, train_labels = data.load_iso_video_list(RGB_training_datalist)
train_steps = len(train_labels) / batch_size
_, test_labels = data.load_iso_video_list(RGB_testing_datalist)
test_steps = len(test_labels) / batch_size
print 'nb_epoch: %d - seq_len: %d - batch_size: %d - weight_decay: %.6f' % (
    nb_epoch, seq_len, batch_size, weight_decay)


def lr_polynomial_decay(global_step):
    learning_rate = 0.001
    end_learning_rate = 0.000001
    decay_steps = train_steps * nb_epoch
    power = 0.9
    p = float(global_step) / float(decay_steps)
    lr = (learning_rate - end_learning_rate) * \
        np.power(1-p, power)+end_learning_rate
コード例 #6
0
ファイル: datagen.py プロジェクト: NNUCJ/ConvLSTMForGR
def jesterTrainImageGenerator(filepath, batch_size, seq_len, num_classes,
                              modality):
    X_train, y_train = data.load_iso_video_list(filepath)
    X_tridx = np.asarray(np.arange(0, len(y_train)), dtype=np.int32)
    y_train = np.asarray(y_train, dtype=np.int32)
    while 1:
        for X_indices, y_label_t in minibatches(X_tridx,
                                                y_train,
                                                batch_size,
                                                shuffle=True):
            # Read data for each batch
            image_path = []
            image_fcnt = []
            image_olen = []
            image_start = []
            is_training = []
            for data_a in range(batch_size):
                X_index_a = X_indices[data_a]
                key_str = '%06d' % X_index_a
                if USING_PYRAMID_INPUT == True:
                    framecnt = X_train[key_str]['framecnt']
                    if framecnt <= seq_len:
                        for pid in range(3):
                            image_path.append(X_train[key_str]['videopath'])
                            image_fcnt.append(X_train[key_str]['framecnt'])
                            image_olen.append(seq_len)
                            image_start.append(1)
                            is_training.append(True)  # Training
                    elif framecnt < seq_len * 3:
                        image_path.append(X_train[key_str]['videopath'])
                        image_fcnt.append(seq_len)
                        image_olen.append(seq_len)
                        image_start.append(1)
                        is_training.append(True)  # Training
                        image_path.append(X_train[key_str]['videopath'])
                        image_fcnt.append(seq_len)
                        image_olen.append(seq_len)
                        image_start.append(framecnt / 2 - seq_len / 2 + 1)
                        is_training.append(True)  # Training
                        image_path.append(X_train[key_str]['videopath'])
                        image_fcnt.append(seq_len)
                        image_olen.append(seq_len)
                        image_start.append(framecnt - seq_len + 1)
                        is_training.append(True)  # Training
                    else:
                        for pid in range(3):
                            image_path.append(X_train[key_str]['videopath'])
                            image_fcnt.append(framecnt / 3)
                            image_olen.append(seq_len)
                            image_start.append(framecnt * pid / 3 + 1)
                            is_training.append(True)  # Training
                image_path.append(X_train[key_str]['videopath'])
                image_fcnt.append(X_train[key_str]['framecnt'])
                image_olen.append(seq_len)
                image_start.append(1)
                is_training.append(True)  # Training
            image_info = zip(image_path, image_fcnt, image_olen, image_start,
                             is_training)
            if modality == 0:  #RGB
                X_data_t = threading_data([_ for _ in image_info],
                                          data.prepare_jester_rgb_data)
            if modality == 2:  #Flow
                X_data_t = threading_data([_ for _ in image_info],
                                          data.prepare_jester_flow_data)
            y_hot_label_t = keras.utils.to_categorical(y_label_t,
                                                       num_classes=num_classes)
            yield (X_data_t, y_hot_label_t)