Beispiel #1
0
def build_model_load_weights(image_dim, model_dir, model_name):
    opt = SGD(lr=0.001)
    model = build_FCNN(
        batch_size=1,
        patch_size=image_dim,
        optimizer=opt,
        output_ROI_mask=False,
    )
    weight_files = os.listdir(model_dir)
    weight_files.sort()
    weight_files = weight_files[::-1]

    pattern = re.compile(model_name)  # 14-5.8780
    #07-1067.8649.h5 29-5.8353 07-1067.8649 04-1067.5609-better.h5

    for file in weight_files:
        has_match = pattern.search(file)
        if has_match:
            break
    best_model = has_match.group(0)
    print(">>>> best_model: {}".format(best_model))
    # best_model = weight_files[-1]
    model.load_weights(filepath=os.path.join(model_dir, best_model),
                       by_name=True)

    # model.load_weights('load_weights/3single_views/3single_views_weights_encode_decoder.h5')
    # model.save_weights('load_weights/3single_views/3single_views_weights_encode_decoder_learned.h5')

    return model
Beispiel #2
0
def main(exp_name='FCNN', verbosity=0):
    ################################################################################
    # Experiment Settings
    ################################################################################

    # Model + Generator hyper-parameters
    optimizer = 'sgd'
    learning_rate = 0.0001
    # learning_rate = 0.002  # for Nadam only
    lr_decay = 0.00001
    momentum = 0.9
    nesterov = False
    weight_decay = 0.0001 # 0.001
    #save_dir = os.path.join("/opt/visal/di/models/models_keras2/model_cell", exp_name)
    save_dir = os.path.join("", exp_name)

    batch_size = 1
    epochs = 2000
    images_per_set = None
    patches_per_image = 1
    patch_dim = (360, 640, 1)
    image_shuffle = True
    patch_shuffle = True
    epoch_random_state = None  # Set this to an integer if you want the data the same in every epoch

    train_samples = 700
    val_samples   = 289

    ################################################################################
    # Model Definition
    if optimizer.lower() == 'sgd':
        opt = SGD(
            lr=learning_rate,
            decay=lr_decay,
            momentum=momentum,
            nesterov=nesterov,
            # lr_policy='inv' if params_solver.get('lr_policy', None) is None else params_solver['lr_policy'],
            # step=params_solver.get('step', 10000000.),  # useful only when lr_policy == 'step'
            clipnorm=5,
            clipvalue=1)
    elif optimizer.lower() == 'adam':
        logging.info("use Adam solver")
        opt = Adam(
            lr=learning_rate,
            # decay=params_solver.get('lr_decay', 0.),
            clipnorm=5,
            clipvalue=1)
    elif optimizer.lower() == 'nadam':
        logging.info("use Nadam solver")
        opt = Nadam(
            lr=learning_rate,
            clipnorm=5,
            clipvalue=1)
    else:
        logging.error('Unrecognized solver')

    model = build_FCNN(
        batch_size=batch_size,
        patch_size=patch_dim,
        optimizer=opt,
        base_weight_decay=weight_decay,
        output_ROI_mask=False,
    )


    # Generator setup
    scaler_stability_factor = 1000 #100


    train_path0 = '/opt/visal/home/Multi_view/Datasets/Duke_MTMTC/F:/Preprocessing/Density_map/'
    # train_path0 = '/home/zq/codes/SSHFS/Futudrama2/Multi_view/Datasets/Duke_MTMC/'

    train_view1_1 = train_path0 + 'dmaps/train/DukeMTMC_view1_dmap_10.h5'
    train_view2_1 = train_path0 + 'dmaps/train/DukeMTMC_view2_dmap_10.h5'
    train_view3_1 = train_path0 + 'dmaps/train/DukeMTMC_view3_dmap_10.h5'
    train_view4_1 = train_path0 + 'dmaps/train/DukeMTMC_view4_dmap_10_proj.h5'
    train_GP_1 = train_path0 + 'GP_dmaps/train/DukeMTMC_groundplane_train_dmaps_10.h5'

    h5file_train_view1 = [train_view1_1]
    h5file_train_view2 = [train_view2_1]
    h5file_train_view3 = [train_view3_1]
    h5file_train_view4 = [train_view4_1]
    h5file_train_GP = [train_GP_1]

    train_gen = datagen_v3(
        h5file_view1 = h5file_train_view1,
        h5file_view2 = h5file_train_view2,
        h5file_view3 = h5file_train_view3,
        h5file_view4 = h5file_train_view4,
        h5file_GP = h5file_train_GP,

        batch_size=batch_size,
        images_per_set=images_per_set,
        patches_per_image=patches_per_image,
        patch_dim=patch_dim[:2],
        density_scaler=scaler_stability_factor,
        image_shuffle=image_shuffle,
        patch_shuffle=patch_shuffle,
        random_state=epoch_random_state
    )

    test_view1_1 = train_path0 + 'dmaps/test/DukeMTMC_view1_dmap_10.h5'
    test_view2_1 = train_path0 + 'dmaps/test/DukeMTMC_view2_dmap_10.h5'
    test_view3_1 = train_path0 + 'dmaps/test/DukeMTMC_view3_dmap_10.h5'
    test_view4_1 = train_path0 + 'dmaps/test/DukeMTMC_view4_dmap_10_proj.h5'
    test_GP_1 = train_path0 + 'GP_dmaps/test/DukeMTMC_groundplane_test_dmaps_10.h5'

    h5file_test_GP = [test_GP_1]

    h5file_test_view1 = [test_view1_1]
    h5file_test_view2 = [test_view2_1]
    h5file_test_view3 = [test_view3_1]
    h5file_test_view4 = [test_view4_1]

    val_gen = datagen_v3(
        h5file_view1=h5file_test_view1,
        h5file_view2=h5file_test_view2,
        h5file_view3=h5file_test_view3,
        h5file_view4=h5file_test_view4,
        h5file_GP=h5file_test_GP,

        batch_size=batch_size,
        images_per_set=images_per_set,
        patches_per_image=1,  # 1000,
        patch_dim=patch_dim[:2],
        density_scaler=scaler_stability_factor,
        image_shuffle=image_shuffle,
        patch_shuffle=patch_shuffle,
        random_state=epoch_random_state
    )



    # Model Training
    # Save directory
    if not os.path.exists(save_dir):
        logging.info(">>>> save dir: {}".format(save_dir))
        os.makedirs(save_dir)
    callbacks = list()
    callbacks.append(CSVLogger(
        filename=os.path.join(save_dir, 'train_val.csv'),
        separator=',',
        append=False,  # useful if it's resumed from the latest checkpoint
    ))
    callbacks.append(ModelCheckpoint(
        filepath=os.path.join(save_dir, '{epoch:02d}-{val_loss:.4f}.h5'),
        monitor='val_loss',
        verbose=1,
        save_weights_only=True,
        save_best_only=False,
    ))
    callbacks.append(ModelCheckpoint(
        filepath=os.path.join(save_dir, '{epoch:02d}-{val_loss:.4f}-better.h5'),
        monitor='val_loss',
        verbose=1,
        save_weights_only=True,
        save_best_only=True,  # this will save all the improved models
    ))
    #callbacks.append(EarlyStopping(monitor='val_loss', patience=50, verbose=1, mode='auto'))
    # callbacks.append(TensorBoard(
    #     log_dir=os.path.join(save_dir, 'TensorBoard_info'),
    #     histogram_freq=1,
    #     write_graph=True,
    #     write_images=True,
    #     embeddings_freq=0,  # default, new feature only in latest keras and tensorflow
    #     embeddings_layer_names=None,  # default
    #     embeddings_metadata=None,  # default
    # ))
    if verbosity == 0:
        callbacks.append(batch_loss_callback(
            filename=os.path.join(save_dir, 'train_val_loss_batch.log'),
            append=False,  # useful if it's resumed from the latest checkpoint
        ))

    logging.info('Begin training...')
    start_time = time.time()
    # train the network from here
    model.fit_generator(
        generator=train_gen,
        steps_per_epoch=train_samples // batch_size,
        epochs=epochs,
        verbose=verbosity,
        callbacks=callbacks,
        validation_data=val_gen,
        validation_steps=val_samples // batch_size,
        max_q_size=20,
        workers=1,
        pickle_safe=False)

    # YY = model.layers[60].output
    # print(sum(YY.flatten))

    logging.info('----- {:.2f} seconds -----'.format(time.time() - start_time))
Beispiel #3
0
def main(exp_name='FCNN', verbosity=0):
    ################################################################################
    # Experiment Settings
    ################################################################################

    # Model + Generator hyper-parameters
    optimizer = 'sgd'
    learning_rate = 0.0001  # 0.0001
    # learning_rate = 0.002  # for Nadam only
    lr_decay = 0.0001
    momentum = 0.9
    nesterov = False
    weight_decay = 0.0001  # 0.001
    save_dir = os.path.join("", exp_name)

    batch_size = 1
    epochs = 2000
    images_per_set = None
    patches_per_image = 1  #1000
    patch_dim = (288, 384, 1)
    image_shuffle = True
    patch_shuffle = True
    epoch_random_state = None  # Set this to an integer if you want the data the same in every epoch

    train_samples = 1105  #243243 #20490
    val_samples = 794  #59598 #8398

    ################################################################################
    # Model Definition
    if optimizer.lower() == 'sgd':
        opt = SGD(
            lr=learning_rate,
            decay=lr_decay,
            momentum=momentum,
            nesterov=nesterov,
            # lr_policy='inv' if params_solver.get('lr_policy', None) is None else params_solver['lr_policy'],
            # step=params_solver.get('step', 10000000.),  # useful only when lr_policy == 'step'
            clipnorm=5,
            clipvalue=1)
    elif optimizer.lower() == 'adam':
        logging.info("use Adam solver")
        opt = Adam(
            lr=learning_rate,
            # decay=params_solver.get('lr_decay', 0.),
            clipnorm=5,
            clipvalue=1)
    elif optimizer.lower() == 'nadam':
        logging.info("use Nadam solver")
        opt = Nadam(lr=learning_rate, clipnorm=5, clipvalue=1)
    else:
        logging.error('Unrecognized solver')

    model = build_FCNN(
        batch_size=batch_size,
        patch_size=patch_dim,
        optimizer=opt,
        base_weight_decay=weight_decay,
        output_ROI_mask=False,
    )
    # loading single-view counting feature extraction weights performs better

    # Generator setup
    scaler_stability_factor = 1000  # 100

    train_path0 = '/opt/visal/home/Multi_view/Datasets/PETS_2009/dmaps/'
    train_view1_1 = train_path0 + 'S1L3/14_17/train_test/PETS_S1L3_1_view1_train_test_10.h5'
    train_view1_2 = train_path0 + 'S1L3/14_33/train_test/PETS_S1L3_2_view1_train_test_10.h5'
    train_view1_3 = train_path0 + 'S2L2/14_55/train_test/10_10/PETS_S2L2_1_view1_train_test_10.h5'
    train_view1_4 = train_path0 + 'S2L3/14_41/train_test/10_10/PETS_S2L3_1_view1_train_test_10.h5'

    train_view2_1 = train_path0 + 'S1L3/14_17/train_test/PETS_S1L3_1_view2_train_test_10.h5'
    train_view2_2 = train_path0 + 'S1L3/14_33/train_test/PETS_S1L3_2_view2_train_test_10.h5'
    train_view2_3 = train_path0 + 'S2L2/14_55/train_test/10_10/PETS_S2L2_1_view2_train_test_10.h5'
    train_view2_4 = train_path0 + 'S2L3/14_41/train_test/10_10/PETS_S2L3_1_view2_train_test_10.h5'

    train_view3_1 = train_path0 + 'S1L3/14_17/train_test/PETS_S1L3_1_view3_train_test_10.h5'
    train_view3_2 = train_path0 + 'S1L3/14_33/train_test/PETS_S1L3_2_view3_train_test_10.h5'
    train_view3_3 = train_path0 + 'S2L2/14_55/train_test/10_10/PETS_S2L2_1_view3_train_test_10.h5'
    train_view3_4 = train_path0 + 'S2L3/14_41/train_test/10_10/PETS_S2L3_1_view3_train_test_10.h5'

    train_GP_1 = train_path0 + 'S1L3/14_17/GP_maps/PETS_S1L3_1_groundplane_dmaps_10.h5'
    train_GP_2 = train_path0 + 'S1L3/14_33/GP_maps/PETS_S1L3_2_groundplane_dmaps_10.h5'
    train_GP_3 = train_path0 + 'S2L2/14_55/GP_maps/PETS_S2L2_1_groundplane_dmaps_10.h5'
    train_GP_4 = train_path0 + 'S2L3/14_41/GP_maps/PETS_S2L3_1_groundplane_dmaps_10.h5'

    h5file_train_view1 = [
        train_view1_1, train_view1_2, train_view1_3, train_view1_4
    ]
    h5file_train_view2 = [
        train_view2_1, train_view2_2, train_view2_3, train_view2_4
    ]
    h5file_train_view3 = [
        train_view3_1, train_view3_2, train_view3_3, train_view3_4
    ]
    h5file_train_GP = [train_GP_1, train_GP_2, train_GP_3, train_GP_4]

    train_gen = datagen_v3(h5file_view1=h5file_train_view1,
                           h5file_view2=h5file_train_view2,
                           h5file_view3=h5file_train_view3,
                           h5file_GP=h5file_train_GP,
                           batch_size=batch_size,
                           images_per_set=images_per_set,
                           patches_per_image=patches_per_image,
                           patch_dim=patch_dim[:2],
                           density_scaler=scaler_stability_factor,
                           image_shuffle=image_shuffle,
                           patch_shuffle=patch_shuffle,
                           random_state=epoch_random_state)

    test_path0 = '/opt/visal/home/qzhang364/Multi_view/Datasets/PETS_2009/dmaps/'
    #test_path0 = '/media/zq/16A4D077A4D05B37/0_Ubuntu/datasets/PETS_2009/dmaps/'
    test_view1_1 = test_path0 + 'S1L1/13_57/train_test/PETS_S1L1_1_view1_train_test_10.h5'
    test_view1_2 = test_path0 + 'S1L1/13_59/train_test/PETS_S1L1_2_view1_train_test_10.h5'
    test_view1_3 = test_path0 + 'S1L2/14_06/train_test/PETS_S1L2_1_view1_train_test_10.h5'
    test_view1_4 = test_path0 + 'S1L2/14_31/train_test/PETS_S1L2_2_view1_train_test_10.h5'

    test_view2_1 = test_path0 + 'S1L1/13_57/train_test/PETS_S1L1_1_view2_train_test_10.h5'
    test_view2_2 = test_path0 + 'S1L1/13_59/train_test/PETS_S1L1_2_view2_train_test_10.h5'
    test_view2_3 = test_path0 + 'S1L2/14_06/train_test/PETS_S1L2_1_view2_train_test_10.h5'
    test_view2_4 = test_path0 + 'S1L2/14_31/train_test/PETS_S1L2_2_view2_train_test_10.h5'

    test_view3_1 = test_path0 + 'S1L1/13_57/train_test/PETS_S1L1_1_view3_train_test_10.h5'
    test_view3_2 = test_path0 + 'S1L1/13_59/train_test/PETS_S1L1_2_view3_train_test_10.h5'
    test_view3_3 = test_path0 + 'S1L2/14_06/train_test/PETS_S1L2_1_view3_train_test_10.h5'
    test_view3_4 = test_path0 + 'S1L2/14_31/train_test/PETS_S1L2_2_view3_train_test_10.h5'

    test_GP_1 = test_path0 + 'S1L1/13_57/GP_maps/PETS_S1L1_1_groundplane_dmaps_10.h5'
    test_GP_2 = test_path0 + 'S1L1/13_59/GP_maps/PETS_S1L1_2_groundplane_dmaps_10.h5'
    test_GP_3 = test_path0 + 'S1L2/14_06/GP_maps/PETS_S1L2_1_groundplane_dmaps_10.h5'
    test_GP_4 = test_path0 + 'S1L2/14_31/GP_maps/PETS_S1L2_2_groundplane_dmaps_10.h5'

    h5file_test_GP = [test_GP_1, test_GP_2, test_GP_3, test_GP_4]

    h5file_test_view1 = [
        test_view1_1, test_view1_2, test_view1_3, test_view1_4
    ]
    h5file_test_view2 = [
        test_view2_1, test_view2_2, test_view2_3, test_view2_4
    ]
    h5file_test_view3 = [
        test_view3_1, test_view3_2, test_view3_3, test_view3_4
    ]

    val_gen = datagen_v3(
        h5file_view1=h5file_test_view1,
        h5file_view2=h5file_test_view2,
        h5file_view3=h5file_test_view3,
        h5file_GP=h5file_test_GP,
        batch_size=batch_size,
        images_per_set=images_per_set,
        patches_per_image=1,  # 1000,
        patch_dim=patch_dim[:2],
        density_scaler=scaler_stability_factor,
        image_shuffle=image_shuffle,
        patch_shuffle=patch_shuffle,
        random_state=epoch_random_state)

    # Model Training
    # Save directory
    if not os.path.exists(save_dir):
        logging.info(">>>> save dir: {}".format(save_dir))
        os.makedirs(save_dir)
    callbacks = list()
    callbacks.append(
        CSVLogger(
            filename=os.path.join(save_dir, 'train_val.csv'),
            separator=',',
            append=False,  # useful if it's resumed from the latest checkpoint
        ))
    callbacks.append(
        ModelCheckpoint(
            filepath=os.path.join(save_dir, '{epoch:02d}-{val_loss:.4f}.h5'),
            monitor='val_loss',
            verbose=1,
            save_weights_only=True,
            save_best_only=False,
        ))
    callbacks.append(
        ModelCheckpoint(
            filepath=os.path.join(save_dir,
                                  '{epoch:02d}-{val_loss:.4f}-better.h5'),
            monitor='val_loss',
            verbose=1,
            save_weights_only=True,
            save_best_only=True,  # this will save all the improved models
        ))
    #callbacks.append(EarlyStopping(monitor='val_loss', patience=50, verbose=1, mode='auto'))
    # callbacks.append(TensorBoard(
    #     log_dir=os.path.join(save_dir, 'TensorBoard_info'),
    #     histogram_freq=1,
    #     write_graph=True,
    #     write_images=True,
    #     embeddings_freq=0,  # default, new feature only in latest keras and tensorflow
    #     embeddings_layer_names=None,  # default
    #     embeddings_metadata=None,  # default
    # ))
    if verbosity == 0:
        callbacks.append(
            batch_loss_callback(
                filename=os.path.join(save_dir, 'train_val_loss_batch.log'),
                append=
                False,  # useful if it's resumed from the latest checkpoint
            ))

    logging.info('Begin training...')
    start_time = time.time()
    # train the network from here
    model.fit_generator(generator=train_gen,
                        steps_per_epoch=train_samples // batch_size,
                        epochs=epochs,
                        verbose=verbosity,
                        callbacks=callbacks,
                        validation_data=val_gen,
                        validation_steps=val_samples // batch_size,
                        max_q_size=20,
                        workers=1,
                        pickle_safe=False)

    # YY = model.layers[60].output
    # print(sum(YY.flatten))

    logging.info('----- {:.2f} seconds -----'.format(time.time() - start_time))