parser.add_argument('--batch_size',
                    help='The training batch size.',
                    type=int,
                    default=5)
parser.add_argument('--weights',
                    help='The path of weights to be loaded.',
                    type=str,
                    default=None)

args = parser.parse_args()

# check related paths
paths = check_related_path(os.getcwd())

# get image and label file names for training and validation
_, _, _, _, test_image_names, test_label_names = get_dataset_info(args.dataset)

# build the model
net, base_model = builder(args.num_classes,
                          (args.crop_height, args.crop_width), args.model,
                          args.base_model)

# summary
net.summary()

# load weights
print('Loading the weights...')
if args.weights is None:
    net.load_weights(filepath=os.path.join(
        paths['weigths_path'], '{model}_based_on_{base_model}.h5'.format(
            model=args.model, base_model=base_model)))
Exemplo n.º 2
0
parser.add_argument('--data_aug_rate', help='The rate of data augmentation.', type=float, default=0.)
parser.add_argument('--checkpoint_freq', help='How often to save a checkpoint.', type=int, default=5)
parser.add_argument('--validation_freq', help='How often to perform validation.', type=int, default=1)
parser.add_argument('--num_valid_images', help='The number of images used for validation.', type=int, default=20)
parser.add_argument('--data_shuffle', help='Whether to shuffle the data.', type=str2bool, default=True)
parser.add_argument('--random_seed', help='The random shuffle seed.', type=int, default=None)
parser.add_argument('--weights', help='The path of weights to be loaded.', type=str, default=None)
parser.add_argument('--steps_per_epoch', help='The training steps of each epoch', type=int, default=None)

args = parser.parse_args()

# check related paths
paths = check_related_path(os.getcwd())

# get image and label file names for training and validation
train_image_names, train_label_names, valid_image_names, valid_label_names, _, _ = get_dataset_info(args.dataset)

# build the model
net, base_model = builder(args.num_classes, (args.crop_height, args.crop_width), args.model, args.base_model)

# summary
net.summary()

# load weights
if args.weights is not None:
    print('Loading the weights...')
    net.load_weights(args.weights)

# chose loss
losses = {'CE': categorical_crossentropy_with_logits,
          'Focal_Loss': focal_loss()}