model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu")) model.add(MaxPool2D(pool_size=(2,2),strides=(2,2))) model.add(Flatten()) input = layers.Input(batch_shape=(batch_size, time_step, INPUT_HEIGHT, INPUT_WIDTH, 1)) tdOut = td(model)(input) lstmOut = layers.LSTM(50, activation='tanh')(tdOut) preds = layers.Dense(5, activation='relu')(lstmOut) tdmodel = tf.keras.models.Model(inputs=input, outputs=preds) opt = tf.keras.optimizers.Adam(learning_rate=0.001) tdmodel.compile(optimizer=opt, loss='MSE', metrics=['accuracy']) training_generator = DataSequencer(trainPaths, trainLabels, batch_size, time_step, (INPUT_HEIGHT, INPUT_WIDTH, 1)) testing_generator = DataSequencer(testPaths, testLabels, batch_size, time_step, (INPUT_HEIGHT, INPUT_WIDTH, 1)) history = tdmodel.fit(x=training_generator, epochs=epochs, steps_per_epoch=len(training_generator),
base_model = tf.keras.applications.VGG16(weights='imagenet', input_shape=(64, 64, 3), include_top=False) cnnOut = base_model.layers[18].output cnnModel = tf.keras.Model(base_model.input, cnnOut) for layer in cnnModel.layers: layer.trainable = False # or if we want to set the first 20 layers of the network to be non-trainable for layer in cnnModel.layers[-8:]: layer.trainable = True input = layers.Input(batch_shape=(args.batch_size, args.time_step, 64, 64, 3)) tdOut = td(cnnModel)(input) flOut = td(layers.Flatten())(tdOut) lstmOut = layers.LSTM(50, activation='tanh')(flOut) preds = layers.Dense(syn_head_gen.num_bins, activation='softmax')(lstmOut) model = tf.keras.models.Model(inputs=input, outputs=preds) for i, layer in enumerate(model.layers): print(i, layer.name) for layer in model.layers: layer.trainable = False # or if we want to set the first 20 layers of the network to be non-trainable for layer in model.layers[-8:]: layer.trainable = True print(layer.name)
s_train_roll) full_train_bg = b_train_bg + k_train_bg + s_train_bg full_train_video_start = np.vstack( (b_train_video_start, k_train_video_start, s_train_video_start)) test_images, test_pitch, test_yaw, test_roll, test_bg, test_video_start = \ syn_head_gen.create_data(args.biwi_test_dir, args.biwi_model_test_list, args.biwi_move_test_list) image = cv2.imread(full_train_images[0, 0], 1) model = tf.keras.Sequential() model.add( td(layers.Conv2D(filters=64, kernel_size=(3, 3), strides=4, padding='same', activation=tf.nn.relu), input_shape=(args.time_step, 64, 64, 3))) model.add(td(layers.MaxPool2D(pool_size=(3, 3), strides=2))) model.add( td( layers.Conv2D(filters=192, kernel_size=(3, 3), padding='same', activation=tf.nn.relu))) model.add(td(layers.MaxPool2D(pool_size=(3, 3), strides=2))) model.add( td( layers.Conv2D(filters=384, kernel_size=(3, 3), padding='same',