img_file_source = os.path.join(dir_path, f)

            (filedir, tempfilename) = os.path.split(img_file_source)
            (filename, extension) = os.path.splitext(tempfilename)

            if extension.upper() not in [
                    '.BMP', '.PNG', '.JPG', '.JPEG', '.TIFF', '.TIF'
            ]:
                print('file ext name:', f)
                continue

            if ('/dr1_db/0' in dir_path) or ('/zgh/0' in dir_path):
                csv_writer.writerow([img_file_source, 0])

            if ('/dr1_db/1' in dir_path) or ('/zgh/1' in dir_path):
                csv_writer.writerow([img_file_source, 1])

from LIBS.DataPreprocess import my_data
train_files, train_labels, valid_files, valid_labels = my_data.split_dataset(
    filename_csv, valid_ratio=0.15, random_state=1111)
from LIBS.DataPreprocess.my_data import write_images_labels_csv
write_images_labels_csv(train_files,
                        train_labels,
                        filename_csv='Subclass_' + str(sub_class_no) +
                        '_train.csv')
write_images_labels_csv(valid_files,
                        valid_labels,
                        filename_csv='Subclass_' + str(sub_class_no) +
                        '_valid.csv')

print('OK')
def train(model):

    datafile_type = 'dataset3'  #including ROP data
    filename_csv = os.path.join(os.path.abspath('..'), 'optic_disc',
                                'datafiles', datafile_type, 'images_masks.csv')

    from LIBS.DataPreprocess.my_data import split_dataset
    train_image_files, train_mask_files, valid_image_files, valid_mask_files = split_dataset(
        filename_csv,
        valid_ratio=0.1,
        random_state=2223,
        field_columns=['images', 'masks'])

    # Training dataset.
    dataset_train = OpticDiscDataset()
    dataset_train.load_OpticDisc(train_image_files)
    dataset_train.prepare()

    # Validation dataset
    dataset_val = OpticDiscDataset()
    dataset_val.load_OpticDisc(valid_image_files)
    dataset_val.prepare()

    #region  Image augmentation
    sometimes = lambda aug: iaa.Sometimes(0.96, aug)
    imgaug_seq = iaa.Sequential([
        # iaa.Crop(px=(0, 16)),  # crop images from each side by 0 to 16px (randomly chosen)
        iaa.Fliplr(0.5),  # horizontally flip 50% of the images
        iaa.Flipud(0.2),  # horizontally flip 10% of the images
        iaa.Crop(px=(0, 20)),
        sometimes(
            iaa.Affine(
                scale={
                    "x": (0.92, 1.08),
                    "y": (0.92, 1.08)
                },
                translate_percent={
                    "x": (-0.08, 0.08),
                    "y": (-0.08, 0.08)
                },
                # translate by -20 to +20 percent (per axis)
                rotate=(0, 360),  # rotate by -45 to +45 degrees
            )),
    ])

    # endregion

    print("Training network heads")
    model.train(
        dataset_train,
        dataset_val,
        learning_rate=config.LEARNING_RATE1,
        epochs=10,  #20
        augmentation=imgaug_seq,
        layers='heads')

    print("Train all layers")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE2,
                epochs=40,
                augmentation=imgaug_seq,
                layers='all')
Beispiel #3
0
from LIBS.CNN_Models.my_loss.my_metrics import *
from LIBS.DataPreprocess.my_images_generator_seg import my_Generator_seg
from LIBS.CNN_Models.my_multi_gpu import ModelMGPU

#region initial setting
BATCH_SIZE_TRAIN = 16
BATCH_SIZE_VALID = 16

TRAIN_TYPE = 'BloodVessel384'
model_save_dir = '/tmp5/' + TRAIN_TYPE

DATFILE_TYPE = 'dataset1'
csv_file = os.path.join(os.path.abspath('..'), 'datafiles', DATFILE_TYPE,
                        TRAIN_TYPE + '.csv')
train_image_files, train_mask_files, valid_image_files, valid_mask_files = \
    split_dataset(csv_file, valid_ratio=0.1, field_columns=['images', 'masks'])

img_size = 384
image_shape = (img_size, img_size, 3)
#endregion

TRAIN_TIMES = 5

for i in range(TRAIN_TIMES):

    #region data generator
    from imgaug import augmenters as iaa
    sometimes = lambda aug: iaa.Sometimes(0.96, aug)
    imgaug_train_seq = iaa.Sequential([
        # iaa.CropAndPad(percent=(-0.04, 0.04)),
        iaa.Fliplr(0.5),  # horizontally flip 50% of the images
Beispiel #4
0
dir_preprocess = '/media/ubuntu/data1/糖网项目/眼别/preprocess384'

filename_csv = os.path.join(os.path.abspath('..'), 'datafiles', TRAIN_TYPE,
                            'split_patid_{}.csv'.format(DATA_VERSION))
dict_mapping = {'左眼': 0, '右眼': 1}
my_data.write_csv_based_on_dir(filename_csv,
                               dir_preprocess,
                               dict_mapping,
                               match_type='partial')

#endregion

#region split dataset

train_files, train_labels, valid_files, valid_labels, test_files, test_labels = \
    my_data.split_dataset(filename_csv, valid_ratio=0.1, test_ratio=0.15)
filename_csv_train = os.path.join(
    os.path.abspath('..'), 'datafiles', TRAIN_TYPE,
    'split_patid_train_{}.csv'.format(DATA_VERSION))
filename_csv_valid = os.path.join(
    os.path.abspath('..'), 'datafiles', TRAIN_TYPE,
    'split_patid_valid_{}.csv'.format(DATA_VERSION))
filename_csv_test = os.path.join(
    os.path.abspath('..'), 'datafiles', TRAIN_TYPE,
    'split_patid_test_{}.csv'.format(DATA_VERSION))

my_data.write_images_labels_csv(train_files, train_labels, filename_csv_train)
my_data.write_images_labels_csv(valid_files, valid_labels, filename_csv_valid)
my_data.write_images_labels_csv(test_files, test_labels, filename_csv_test)
'''
filename_pkl_train = os.path.join(os.path.abspath('.'),
Beispiel #5
0
            if patient_id == '':
                images = row["images"]
                labels = row["labels"]
                csv_writer.writerow([images, labels, ' '])


if OP_PAT_ID_SPLIT:
    filename_csv_result = os.path.abspath(
        os.path.join(sys.path[0], "..", 'datafiles/2020_2_29',
                     'DLP_patient_based_split.csv'))
    convert_patient_id(filename_csv, filename_csv_result)

    #input dataset order by patient_id, so do not shuffle
    train_files, train_labels, valid_files, valid_labels = my_data.split_dataset(
        filename_csv_result,
        valid_ratio=0.15,
        shuffle=False,
        random_state=2223)

    filename_csv_train = os.path.join(os.path.abspath('..'), 'datafiles',
                                      datafile_type,
                                      'DLP_patient_based_split_train.csv')
    my_data.write_images_labels_csv(train_files,
                                    train_labels,
                                    filename_csv=filename_csv_train)
    filename_csv_valid = os.path.join(os.path.abspath('..'), 'datafiles',
                                      datafile_type,
                                      'DLP_patient_based_split_valid.csv')
    my_data.write_images_labels_csv(valid_files,
                                    valid_labels,
                                    filename_csv=filename_csv_valid)