Ejemplo n.º 1
0
    lrate = LearningRateScheduler(_step_decay)

    # sgd optimizer with lr multipliers
    multisgd = MultiSGD(lr=base_lr,
                        momentum=momentum,
                        decay=0.0,
                        nesterov=False,
                        lr_mult=lr_multipliers)

    loss_funcs = get_loss_funcs(batch_size)
    model.compile(loss=loss_funcs, optimizer=multisgd, metrics=["accuracy"])

    load_any_weights(model, multigpu=args.gpus > 1)

    df = get_dataflow(
        annot_path='%s/annotations/person_keypoints_%s2017.json' %
        (DATA_DIR, args.dataset),
        img_dir='%s/%s2017/' % (DATA_DIR, args.dataset))
    train_samples = df.size()
    print('Collected %d val samples...' % train_samples)
    train_df = batch_dataflow(df,
                              batch_size,
                              time_steps=args.time_steps,
                              format=__format)
    train_gen = gen(train_df)

    print(model.inputs[0].get_shape())
    # print(model.outputs)

    from snapshot import Snapshot

    model.fit_generator(train_gen,
Ejemplo n.º 2
0
    annot_path_train = os.path.join(curr_dir, '../dataset/annotations/person_keypoints_train2017.json')
    img_dir_train = os.path.abspath(os.path.join(curr_dir, '../dataset/train2017/'))
    annot_path_val = os.path.join(curr_dir, '../dataset/annotations/person_keypoints_val2017.json')
    img_dir_val = os.path.abspath(os.path.join(curr_dir, '../dataset/val2017/'))

    # get dataflow of samples from training set and validation set (we use validation set for training as well)

    coco_data_train = COCODataPaths(
        annot_path=annot_path_train,
        img_dir=img_dir_train
    )
    coco_data_val = COCODataPaths(
        annot_path=annot_path_val,
        img_dir=img_dir_val
    )
    df = get_dataflow([coco_data_train, coco_data_val])
    train_samples = df.size()

    # get generator of batches

    batch_df = batch_dataflow(df, batch_size)
    train_gen = gen(batch_df)

    # setup lr multipliers for conv layers

    lr_multipliers = get_lr_multipliers(model)

    # configure callbacks
    #/////////////////////////////////////////////////////////////////
    iterations_per_epoch = train_samples // batch_size
    _step_decay = partial(step_decay,
Ejemplo n.º 3
0
    last_epoch = restore_weights(weights_best_file, model)

    # prepare generators

    curr_dir = os.path.dirname(__file__)
    train_annot_path = os.path.join(
        curr_dir, '../dataset/my_keypoints_final_train.json')
    train_img_dir = os.path.abspath(
        os.path.join(curr_dir, '../dataset/train_tracking_data/'))
    val_annot_path = os.path.join(curr_dir,
                                  '../dataset/my_keypoints_final_test.json')
    val_img_dir = os.path.abspath(
        os.path.join(curr_dir, '../dataset/train_tracking_data/'))
    # get dataflow of samples

    df1 = get_dataflow(annot_path=train_annot_path, img_dir=train_img_dir)
    train_samples = df1.size()
    df2 = get_dataflow(annot_path=val_annot_path, img_dir=train_img_dir)
    val_samples = df2.size()
    # get generator of batches

    batch_df1 = batch_dataflow(df1, batch_size)
    train_gen = gen(batch_df1)
    batch_df2 = batch_dataflow(df2, batch_size)
    val_gen = gen(batch_df2)
    # setup lr multipliers for conv layers

    lr_multipliers = get_lr_multipliers(model)

    # configure callbacks
    # restore weights
    last_epoch = restore_weights(weights_best_file, model)

    #输出模型结果
    # model.summary()

    # prepare generators
    # curr_dir = os.path.dirname(__file__)
    # annot_path = os.path.join(curr_dir, '../dataset/annotations/person_keypoints_train2017.json')
    # img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/train2017/'))
    annot_path = "/media/han/E/mWork/datasets/COCO2017/annotations/person_keypoints_val2017.json"
    img_dir = '/media/han/E/mWork/datasets/COCO2017/val2017/'

    # get dataflow of samples
    df = get_dataflow(  #数据集读取和处理(mask,augment,heatmap,paf等)多线程
        annot_path=annot_path,
        img_dir=img_dir)
    train_samples = df.size()

    # get generator of batches
    batch_df = batch_dataflow(df, batch_size)
    train_gen = gen(batch_df)

    # setup lr multipliers for conv layers
    lr_multipliers = get_lr_multipliers(model)

    # configure callbacks
    iterations_per_epoch = train_samples // batch_size
    _step_decay = partial(step_decay,
                          iterations_per_epoch=iterations_per_epoch)
    lrate = LearningRateScheduler(_step_decay)
Ejemplo n.º 5
0
    model = get_training_model(weight_decay)

    # restore weights

    last_epoch = restore_weights(weights_best_file, model)

    # prepare generators

    curr_dir = os.path.dirname(__file__)
    annot_path = os.path.join(
        curr_dir, '../dataset/annotations/person_keypoints_train2017.json')
    img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/train2017/'))

    # get dataflow of samples

    df = get_dataflow(annot_path=annot_path, img_dir=img_dir)
    train_samples = df.size()

    # get generator of batches

    batch_df = batch_dataflow(df, batch_size)
    train_gen = gen(batch_df)

    # setup lr multipliers for conv layers

    lr_multipliers = get_lr_multipliers(model)

    # configure callbacks

    iterations_per_epoch = train_samples // batch_size
    _step_decay = partial(step_decay,
    # get the model

    model = get_training_model(weight_decay)

    # restore weights

    last_epoch = restore_weights(weights_best_file, model)

    # prepare generators

    curr_dir = os.path.dirname(__file__)
    annot_path = os.path.join(
        curr_dir, '../dataset/annotations/person_keypoints_train2017.json')
    img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/train2017/'))
    df = get_dataflow(annot_path=annot_path,
                      img_dir=img_dir,
                      batch_size=batch_size)
    train_gen = df.get_data()
    train_samples = df.size()

    # setup lr multipliers for conv layers

    lr_multipliers = get_lr_multipliers(model)

    # configure callbacks

    iterations_per_epoch = train_samples // batch_size
    _step_decay = partial(step_decay,
                          iterations_per_epoch=iterations_per_epoch)
    lrate = LearningRateScheduler(_step_decay)
    checkpoint = ModelCheckpoint(weights_best_file,