Exemple #1
0
def merge_set(voc_set_file, sbd_set_file, output_file):
    voc_set_list = get_data_list(voc_set_file, shuffle=False)
    sbd_set_list = get_data_list(sbd_set_file, shuffle=False)

    # use set() struct to clean duplicate items
    output_list = list(set(voc_set_list + sbd_set_list))
    output_list.sort()

    # save merged list
    output_file = open(output_file, 'w')
    for image_id in output_list:
        output_file.write(image_id)
        output_file.write('\n')
    output_file.close()
Exemple #2
0
def main():
    parser = argparse.ArgumentParser(
        argument_default=argparse.SUPPRESS,
        description=
        'evaluate Deeplab model (h5/pb/tflite/mnn) with test dataset')
    '''
    Command line options
    '''
    parser.add_argument('--dataset_file',
                        type=str,
                        required=True,
                        help='eval samples txt file')

    parser.add_argument('--gt_label_path',
                        type=str,
                        required=True,
                        help='path containing groundtruth label png file')

    parser.add_argument('--pred_label_path',
                        type=str,
                        required=True,
                        help='path containing model predict label png file')

    parser.add_argument('--classes_path',
                        type=str,
                        required=False,
                        default='configs/voc_classes.txt',
                        help='path to class definitions, default=%(default)s')

    parser.add_argument(
        '--model_output_shape',
        type=str,
        help='model mask output size as <height>x<width>, default=%(default)s',
        default='512x512')

    parser.add_argument('--show_background',
                        default=False,
                        action="store_true",
                        help='Show background evaluation info')

    args = parser.parse_args()

    # param parse
    height, width = args.model_output_shape.split('x')
    model_output_shape = (int(height), int(width))

    # add background class
    class_names = get_classes(args.classes_path)
    assert len(class_names
               ) < 254, 'PNG image label only support less than 254 classes.'
    class_names = ['background'] + class_names

    # get dataset list
    dataset = get_data_list(args.dataset_file)

    start = time.time()
    eval_mIOU(dataset, args.gt_label_path, args.pred_label_path, class_names,
              model_output_shape, args.show_background)
    end = time.time()
    print("Evaluation time cost: {:.6f}s".format(end - start))
def main():
    parser = argparse.ArgumentParser(
        argument_default=argparse.SUPPRESS,
        description='TF 2.x post training integer quantization converter')

    parser.add_argument('--keras_model_file',
                        required=True,
                        type=str,
                        help='path to keras model file')
    parser.add_argument(
        '--dataset_path',
        required=True,
        type=str,
        help='dataset path containing images and label png file')
    parser.add_argument('--dataset_file',
                        required=True,
                        type=str,
                        help='data samples txt file')
    parser.add_argument(
        '--sample_num',
        type=int,
        help=
        'annotation sample number to feed the converter,default=%(default)s',
        default=30)
    parser.add_argument(
        '--model_input_shape',
        type=str,
        help='model image input shape as <height>x<width>, default=%(default)s',
        default='512x512')
    parser.add_argument('--output_file',
                        required=True,
                        type=str,
                        help='output tflite model file')

    args = parser.parse_args()
    height, width = args.model_input_shape.split('x')
    model_input_shape = (int(height), int(width))

    # get dataset list
    dataset = get_data_list(args.dataset_file)

    post_train_quant_convert(args.keras_model_file, args.dataset_path, dataset,
                             args.sample_num, model_input_shape,
                             args.output_file)
def label_stat(label_path, dataset_file, class_names):
    if not os.path.isdir(label_path):
        raise ValueError('Input path does not exist!\n')

    if dataset_file:
        # get dataset sample list
        dataset = get_data_list(dataset_file)
        png_files = [os.path.join(label_path, image_id.strip()+'.png') for image_id in dataset]
    else:
        png_files = glob.glob(os.path.join(label_path, '*.png'))

    num_classes = len(class_names)
    # count class item number
    class_count = OrderedDict([(item, 0) for item in class_names])
    valid_number = 0

    pbar = tqdm(total=len(png_files), desc='Labels checking')
    for png_file in png_files:
        label_array = np.array(Image.open(png_file))
        # treat all the invalid label value as background
        label_array[label_array>(num_classes-1)] = 0

        # count object class for statistic
        label_list = list(np.unique(label_array))
        if sum(label_list) > 0:
            valid_number += 1
        for label in label_list:
            class_name = class_names[label]
            class_count[class_name] = class_count[class_name] + 1
        pbar.update(1)

    pbar.close()
    # show item number statistic
    print('Image number for each class:')
    for (class_name, number) in class_count.items():
        if class_name == 'background':
            continue
        print('%s: %d' % (class_name, number))
    print('total number of valid label image: ', valid_number)
def main():
    parser = argparse.ArgumentParser(
        argument_default=argparse.SUPPRESS,
        description=
        'evaluate Deeplab model (h5/pb/tflite/mnn) with test dataset')
    '''
    Command line options
    '''
    parser.add_argument('--model_path',
                        type=str,
                        required=True,
                        help='path to model file')

    parser.add_argument(
        '--dataset_path',
        type=str,
        required=True,
        help='dataset path containing images and label png file')

    parser.add_argument('--dataset_file',
                        type=str,
                        required=True,
                        help='eval samples txt file')

    parser.add_argument('--classes_path',
                        type=str,
                        required=False,
                        default='configs/voc_classes.txt',
                        help='path to class definitions, default=%(default)s')

    parser.add_argument(
        '--model_input_shape',
        type=str,
        help='model image input size as <height>x<width>, default=%(default)s',
        default='512x512')

    parser.add_argument('--do_crf',
                        action="store_true",
                        help='whether to add CRF postprocess for model output',
                        default=False)

    parser.add_argument('--show_background',
                        default=False,
                        action="store_true",
                        help='Show background evaluation info')

    parser.add_argument(
        '--save_result',
        default=False,
        action="store_true",
        help='Save the segmentaion result image in result/segmentation dir')

    args = parser.parse_args()

    # param parse
    height, width = args.model_input_shape.split('x')
    model_input_shape = (int(height), int(width))

    # add background class to match model & GT
    class_names = get_classes(args.classes_path)
    assert len(class_names
               ) < 254, 'PNG image label only support less than 254 classes.'
    class_names = ['background'] + class_names

    model, model_format = load_eval_model(args.model_path)

    # get dataset list
    dataset = get_data_list(args.dataset_file)

    start = time.time()
    eval_mIOU(model, model_format, args.dataset_path, dataset, class_names,
              model_input_shape, args.do_crf, args.save_result,
              args.show_background)
    end = time.time()
    print("Evaluation time cost: {:.6f}s".format(end - start))
def main(args):
    log_dir = 'logs/000/'
    # get class info, add background class to match model & GT
    class_names = get_classes(args.classes_path)
    assert len(class_names) < 254, 'PNG image label only support less than 254 classes.'
    class_names = ['background'] + class_names
    num_classes = len(class_names)

    # callbacks for training process
    monitor = 'Jaccard'

    tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=False, write_grads=False, write_images=False, update_freq='batch')
    checkpoint = ModelCheckpoint(os.path.join(log_dir, 'ep{epoch:03d}-loss{loss:.3f}-Jaccard{Jaccard:.3f}-val_loss{val_loss:.3f}-val_Jaccard{val_Jaccard:.3f}.h5'),
        monitor='val_{}'.format(monitor),
        mode='max',
        verbose=1,
        save_weights_only=False,
        save_best_only=True,
        period=1)

    reduce_lr = ReduceLROnPlateau(monitor='val_{}'.format(monitor), factor=0.5, mode='max',
                patience=5, verbose=1, cooldown=0, min_lr=1e-6)
    early_stopping = EarlyStopping(monitor='val_{}'.format(monitor), min_delta=0, patience=100, verbose=1, mode='max')
    terminate_on_nan = TerminateOnNaN()

    callbacks=[tensorboard, checkpoint, reduce_lr, early_stopping, terminate_on_nan]


    # get train&val dataset
    dataset = get_data_list(args.dataset_file)
    if args.val_dataset_file:
        val_dataset = get_data_list(args.val_dataset_file)
        num_train = len(dataset)
        num_val = len(val_dataset)
        dataset.extend(val_dataset)
    else:
        val_split = args.val_split
        num_val = int(len(dataset)*val_split)
        num_train = len(dataset) - num_val

    # prepare train&val data generator
    train_generator = SegmentationGenerator(args.dataset_path, dataset[:num_train],
                                            args.batch_size,
                                            num_classes,
                                            resize_shape=args.model_input_shape[::-1],
                                            crop_shape=None,
                                            weighted_type=args.weighted_type,
                                            augment=True,
                                            do_ahisteq=False)

    valid_generator = SegmentationGenerator(args.dataset_path, dataset[num_train:],
                                            args.batch_size,
                                            num_classes,
                                            resize_shape=args.model_input_shape[::-1],
                                            crop_shape=None,
                                            weighted_type=args.weighted_type,
                                            augment=False,
                                            do_ahisteq=False)


    # prepare online evaluation callback
    if args.eval_online:
        eval_callback = EvalCallBack(args.dataset_path, dataset[num_train:], class_names, args.model_input_shape, args.model_pruning, log_dir, eval_epoch_interval=args.eval_epoch_interval, save_eval_checkpoint=args.save_eval_checkpoint)
        callbacks.append(eval_callback)

    # prepare optimizer
    #optimizer = Adam(lr=7e-4, epsilon=1e-8, decay=1e-6)
    optimizer = get_optimizer(args.optimizer, args.learning_rate, decay_type=None)

    # prepare loss according to loss type & weigted type
    if args.weighted_type == 'balanced':
        classes_weights_path = os.path.join(args.dataset_path, 'classes_weights.txt')
        if os.path.isfile(classes_weights_path):
            weights = load_class_weights(classes_weights_path)
        else:
            weights = calculate_weigths_labels(train_generator, num_classes, save_path=args.dataset_path)
        losses = WeightedSparseCategoricalCrossEntropy(weights)
        sample_weight_mode = None
    elif args.weighted_type == 'adaptive':
        losses = sparse_crossentropy
        sample_weight_mode = 'temporal'
    elif args.weighted_type == None:
        losses = sparse_crossentropy
        sample_weight_mode = None
    else:
        raise ValueError('invalid weighted_type {}'.format(args.weighted_type))

    if args.loss == 'focal':
        warnings.warn("Focal loss doesn't support weighted class balance, will ignore related config")
        losses = softmax_focal_loss
        sample_weight_mode = None
    elif args.loss == 'crossentropy':
        # using crossentropy will keep the weigted type setting
        pass
    else:
        raise ValueError('invalid loss type {}'.format(args.loss))

    # prepare metric
    #metrics = {'pred_mask' : [Jaccard, sparse_accuracy_ignoring_last_label]}
    metrics = {'pred_mask' : Jaccard}

    # support multi-gpu training
    if args.gpu_num >= 2:
        # devices_list=["/gpu:0", "/gpu:1"]
        devices_list=["/gpu:{}".format(n) for n in range(args.gpu_num)]
        strategy = tf.distribute.MirroredStrategy(devices=devices_list)
        print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
        with strategy.scope():
            # get multi-gpu train model
            model = get_deeplabv3p_model(args.model_type, num_classes, args.model_input_shape, args.output_stride, args.freeze_level, weights_path=args.weights_path)
            # compile model
            model.compile(optimizer=optimizer, sample_weight_mode=sample_weight_mode,
                          loss = losses, metrics = metrics)
    else:
        # get normal train model
        model = get_deeplabv3p_model(args.model_type, num_classes, args.model_input_shape, args.output_stride, args.freeze_level, weights_path=args.weights_path)
        # compile model
        model.compile(optimizer=optimizer, sample_weight_mode=sample_weight_mode,
                      loss = losses, metrics = metrics)
    model.summary()

    # Transfer training some epochs with frozen layers first if needed, to get a stable loss.
    initial_epoch = args.init_epoch
    epochs = initial_epoch + args.transfer_epoch
    print("Transfer training stage")
    print('Train on {} samples, val on {} samples, with batch size {}, input_shape {}.'.format(num_train, num_val, args.batch_size, args.model_input_shape))
    model.fit_generator(generator=train_generator,
                        steps_per_epoch=len(train_generator),
                        validation_data=valid_generator,
                        validation_steps=len(valid_generator),
                        epochs=epochs,
                        initial_epoch=initial_epoch,
                        verbose=1,
                        workers=1,
                        use_multiprocessing=False,
                        max_queue_size=10,
                        callbacks = callbacks)

    # Wait 2 seconds for next stage
    time.sleep(2)

    if args.decay_type:
        # rebuild optimizer to apply learning rate decay, only after
        # unfreeze all layers
        callbacks.remove(reduce_lr)
        steps_per_epoch = max(1, len(train_generator))
        decay_steps = steps_per_epoch * (args.total_epoch - args.init_epoch - args.transfer_epoch)
        optimizer = get_optimizer(args.optimizer, args.learning_rate, decay_type=args.decay_type, decay_steps=decay_steps)

    # Unfreeze the whole network for further tuning
    # NOTE: more GPU memory is required after unfreezing the body
    print("Unfreeze and continue training, to fine-tune.")
    if args.gpu_num >= 2:
        with strategy.scope():
            for i in range(len(model.layers)):
                model.layers[i].trainable = True
            model.compile(optimizer=optimizer, sample_weight_mode=sample_weight_mode,
                          loss = losses, metrics = metrics) # recompile to apply the change

    else:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=optimizer, sample_weight_mode=sample_weight_mode,
                      loss = losses, metrics = metrics) # recompile to apply the change

    print('Train on {} samples, val on {} samples, with batch size {}, input_shape {}.'.format(num_train, num_val, args.batch_size, args.model_input_shape))
    model.fit_generator(generator=train_generator,
                        steps_per_epoch=len(train_generator),
                        validation_data=valid_generator,
                        validation_steps=len(valid_generator),
                        epochs=args.total_epoch,
                        initial_epoch=epochs,
                        verbose=1,
                        workers=1,
                        use_multiprocessing=False,
                        max_queue_size=10,
                        callbacks = callbacks)

    # Finally store model
    model.save(os.path.join(log_dir, 'trained_final.h5'))