**params, type_gen='train') validation_generator = DataGeneratorBKB(test_keys, labels, **params, type_gen='test') # # Design model if model_type == 'Conv3D': model = create_model_Conv3D(dim, n_sequence, n_channels, n_output, set_pretrain=True) else: model = create_model_pretrain(dim, n_sequence, n_channels, n_output, 1.0) load_model = True start_epoch = 0 if load_model: # weights_path = 'pretrain/mobileNetV2-BKB-3ds-48-0.55.hdf5' # weights_path = 'BUPT-Conv3D-dataset02-transfer-0-0-0.hdf5' #'KARD-aug-RGBdif-01-0.13-0.17.hdf5' weights_path = 'KARD-Conv3D-RGBdiff-crop-224-650-0.75-0.75.hdf5' #'BUPT-Conv3D-KARD-transfer-0-0-0.hdf5' start_epoch = 650 model.load_weights(weights_path) ## Set callback validate_freq = 10 # filepath= detail_weight+"-{epoch:02d}-{accuracy:.2f}-{val_accuracy:.2f}.hdf5" # checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=False, period=validate_freq) filepath = detail_weight + "-{epoch:02d}-{acc:.2f}-{val_acc:.2f}.hdf5"
index_sampling = get_sampling_frame(length_file) # get index to sampling for j, n_pic in enumerate(index_sampling): print(j, n_pic) cap.set(cv2.CAP_PROP_POS_FRAMES, n_pic) ret, frame = cap.read() new_image = cv2.resize(frame, dim) new_image = new_image / 255.0 X[0, j, :, :, :] = new_image # cv2.imshow('Frame',frame) # cv2.waitKey(500) cap.release() print(X.shape) ## Predict weights_path = 'BUPT-augment-RGBdiff-120-0.90-0.91.hdf5' # weights_path = 'KARD-aug-RGBdif-40-0.92-0.98.hdf5' model = create_model_pretrain(dim, n_sequence, n_channels, n_output, 'MobileNetV2') model.load_weights(weights_path) X[0, ] = calculateRGBdiff(X[0, ]) for i in range(n_sequence): cv2.imshow('Frame', X[0, i]) cv2.waitKey(500) result = model.predict(X) print(result) # class_label = ['run','sit','stand','standup','walk']
kernel_size=(3, 3, 3), activation='relu', kernel_initializer='he_uniform')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Flatten()) model.add(Dense(64, activation='relu')) model.add(Dropout(.4)) model.add(Dense(24, activation='relu')) model.add(Dropout(.4)) model.add(Dense(n_output, activation='softmax')) model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model dim_1 = (120, 120) dim_2 = (120, 120) n_sequence_1 = 10 n_sequence_2 = 8 n_channels = 3 n_output = 5 model_1 = create_model_Conv3D(dim_1, n_sequence_1, n_channels, n_output) model_2 = create_model_pretrain(dim_2, n_sequence_2, n_channels, n_output, 1.0) model_3 = create_model_pretrain(dim_2, n_sequence_2, n_channels, n_output, 0.35) print(model_1.summary()) print(model_2.summary()) print(model_3.summary())