Пример #1
0
    regularizer = reg.Regularizer()
    regularizer.fixresize(200, 200)
    if createdataset:
        create_dataset(savepath=path, im_regularizer=regularizer)

    formatting = confidence_filtered_pb_format(minconf)
    generator = DatasetManager(train_samples=train_samples,
                               valid_samples=valid_samples,
                               batch_size=batch_size,
                               dataset_dir=path,
                               formatting=formatting)
    gt = generator.train()
    cw = create_weights(gt)

    data_processing_plan = ProcessingPlan()

    model1 = train_model(model_generator=lambda: simple_classifier_rgb(weight_decay=weight_decay),
                         dataset_manager=generator,
                         loss='binary_crossentropy',
                         learning_rate=learning_rate,
                         patience=patience,
                         data_processing_plan=data_processing_plan,
                         tb_path='palm_back/',
                         model_name=name,
                         model_path=resources_path(os.path.join("models", "palm_back", name)),
                         epochs=epochs,
                         enable_telegram_log=False,
                         class_weight=cw)


Пример #2
0
    # Plan the processing needed before providing inputs and outputs for training and validation
    data_processing_plan = ProcessingPlan(
        augmenter=Augmenter().shift_hue(augmentation_prob).shift_sat(
            augmentation_prob).shift_val(augmentation_prob),
        regularizer=Regularizer().normalize() if normalize else None,
        keyset={IN('img')})
    data_processing_plan.add_outer(key=IN('img'),
                                   fun=lambda x: preprocess_mobile(255 * x))

    model = train_model(
        model_generator=lambda: regressor_2(input_shape=np.shape(dm.train()[0][
            IN('img')][0])),
        dataset_manager=dm,
        loss={OUT('heats'): loss},
        learning_rate=learning_rate,
        patience=patience,
        data_processing_plan=data_processing_plan,
        tb_path="joints",
        tb_plots={
            'target':
            lambda feed: joint_skeleton_regressor(
                feed, img_key=IN('img'), joints_key=OUT('heats')),
            'output':
            lambda feed: joint_skeleton_regressor(
                feed, img_key=IN('img'), joints_key=NET_OUT('heats'))
        },
        model_name=model,
        model_path=joint_locators_path(),
        epochs=epochs,
        enable_telegram_log=False)
Пример #3
0
                                          .shift_sat(augmentation_prob)
                                          .shift_val(augmentation_prob),
                                          regularizer=Regularizer().normalize() if normalize else None,
                                          keyset={IN('img')})
    data_processing_plan.add_outer(key=IN('img'), fun=lambda x: preprocess_mobile(255*x))

    model = train_model(model_generator=lambda: transfer_mobile_net_joints(# input_shape=np.shape(dm.train()[0][IN('img')][0]),
                                                                          dropout_rate=drate,
                                                                          train_mobilenet=retrain,
                                                                          # activation=K.layers.LeakyReLU(alpha=leaky_slope)
                                                                          ),
                        dataset_manager=dm,
                        loss={OUT('heats'): heatmap_loss,
                              OUT('vis'): 'binary_crossentropy'},
                        learning_rate=learning_rate,
                        patience=patience,
                        data_processing_plan=data_processing_plan,
                        tb_path="joints",
                        tb_plots={'target': lambda feed: joint_skeleton_impression(feed,
                                                                                   img_key=IN('img'),
                                                                                   heats_key=OUT('heats'),
                                                                                   vis_key=OUT('vis')),
                                  'output': lambda feed: joint_skeleton_impression(feed,
                                                                                   img_key=IN('img'),
                                                                                   heats_key=NET_OUT('heats'),
                                                                                   vis_key=NET_OUT('vis')),
                                  },
                        model_name=model,
                        model_path=joint_locators_path(),
                        epochs=epochs,
                        enable_telegram_log=True)
    # Plan the processing needed before providing inputs and outputs for training and validation
    data_processing_plan = ProcessingPlan(augmenter=Augmenter().shift_hue(augmentation_prob)
                                          .shift_sat(augmentation_prob)
                                          .shift_val(augmentation_prob),
                                          regularizer=Regularizer().normalize() if normalize else None,
                                          keyset={IN(0)})  # Today we just need to augment one input...
    model1 = train_model(model_generator=lambda: normalized_convs(input_shape=np.shape(generator.train()[0][IN(0)])[1:],
                                                                  dropout_rate=drate,
                                                                  weight_decay=weight_decay,
                                                                  activation=lambda: K.layers.LeakyReLU(alpha=leaky_slope)),
                         dataset_manager=generator,
                         loss={OUT(0): lambda x, y: prop_heatmap_penalized_fp_loss(x, y,
                                                                                   white_priority=white_priority,
                                                                                   delta=delta)
                               },
                         learning_rate=learning_rate,
                         patience=patience,
                         data_processing_plan=data_processing_plan,
                         tb_path="heat_maps/",
                         tb_plots={'plain_input': lambda feed: feed[IN(0)],
                                   'plain_target': lambda feed: feed[OUT(0)],
                                   'plain_output': lambda feed: feed[NET_OUT(0)],
                                   'combined_mask': lambda feed: get_image_with_mask(feed[IN(0)],
                                                                                     feed[NET_OUT(0)]),
                                   'crops': crop_sprite},
                         model_name=model,
                         model_path=croppers_path(),
                         epochs=epochs,
                         enable_telegram_log=True)
Пример #5
0
 model1 = train_model(
     model_generator=lambda:
     transfer_mobile_net(  # input_shape=np.shape(generator.train()[0][IN(0)])[1:],
         # weight_decay=weight_decay,
         # train_vgg=retrain_vgg_model,
         train_mobilenet=retrain_vgg_model,
         dropout_rate=drate),
     dataset_manager=generator,
     loss={
         OUT(0):
         lambda x, y: prop_heatmap_penalized_fp_loss(
             x, y, white_priority=white_priority, delta=delta)
     },
     learning_rate=learning_rate,
     patience=patience,
     data_processing_plan=data_processing_plan,
     tb_path="heat_maps/",
     tb_plots={
         'plain_input':
         lambda feed: feed[IN(0)],
         'plain_target':
         lambda feed: feed[OUT(0)],
         'plain_output':
         lambda feed: feed[NET_OUT(0)],
         'combined_mask':
         lambda feed: get_image_with_mask(feed[IN(0)], feed[NET_OUT(0)]),
         'crops':
         crop_sprite
     },
     model_name=model,
     model_path=croppers_path(),
     epochs=epochs,
     enable_telegram_log=True)