Esempio n. 1
0
    # get dataflow of samples from training set and validation set (we use validation set for training as well)

    coco_data_train = COCODataPaths(
        annot_path=annot_path_train,
        img_dir=img_dir_train
    )
    coco_data_val = COCODataPaths(
        annot_path=annot_path_val,
        img_dir=img_dir_val
    )
    df = get_dataflow([coco_data_train, coco_data_val])
    train_samples = df.size()

    # get generator of batches

    batch_df = batch_dataflow(df, batch_size)
    train_gen = gen(batch_df)

    # setup lr multipliers for conv layers

    lr_multipliers = get_lr_multipliers(model)

    # configure callbacks
    #/////////////////////////////////////////////////////////////////
    iterations_per_epoch = train_samples // batch_size
    _step_decay = partial(step_decay,
                          iterations_per_epoch=iterations_per_epoch
                          )
    lrate = LearningRateScheduler(_step_decay)
    checkpoint = ModelCheckpoint(weights_best_file, monitor='loss',
                                 verbose=0, save_best_only=False,
Esempio n. 2
0
        curr_dir, '../dataset/my_keypoints_final_train.json')
    train_img_dir = os.path.abspath(
        os.path.join(curr_dir, '../dataset/train_tracking_data/'))
    val_annot_path = os.path.join(curr_dir,
                                  '../dataset/my_keypoints_final_test.json')
    val_img_dir = os.path.abspath(
        os.path.join(curr_dir, '../dataset/train_tracking_data/'))
    # get dataflow of samples

    df1 = get_dataflow(annot_path=train_annot_path, img_dir=train_img_dir)
    train_samples = df1.size()
    df2 = get_dataflow(annot_path=val_annot_path, img_dir=train_img_dir)
    val_samples = df2.size()
    # get generator of batches

    batch_df1 = batch_dataflow(df1, batch_size)
    train_gen = gen(batch_df1)
    batch_df2 = batch_dataflow(df2, batch_size)
    val_gen = gen(batch_df2)
    # setup lr multipliers for conv layers

    lr_multipliers = get_lr_multipliers(model)

    # configure callbacks

    iterations_per_epoch = train_samples // batch_size
    lrate = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.25,
                              patience=30,
                              mode='auto')
    checkpoint = ModelCheckpoint(weights_best_file,