def read_attributes(path): print("Reading execution attributes from ", path) attr = ExecutionAttribute() lines = [line.rstrip('\n') for line in open(path)] attr.seq = int(lines[0].split("=", 1)[1]) attr.img_width = int(lines[1].split("=", 1)[1]) attr.img_height = int(lines[2].split("=", 1)[1]) attr.path = lines[3].split("=", 1)[1] attr.summ_basename = lines[4].split("=", 1)[1] attr.epochs = int(lines[5].split("=", 1)[1]) attr.batch_size = int(lines[6].split("=", 1)[1]) attr.train_data_dir = lines[7].split("=", 1)[1] attr.validation_data_dir = lines[8].split("=", 1)[1] attr.test_data_dir = lines[9].split("=", 1)[1] attr.steps_train = int(lines[10].split("=", 1)[1]) attr.steps_valid = int(lines[11].split("=", 1)[1]) attr.steps_test = int(lines[12].split("=", 1)[1]) attr.architecture = lines[13].split("=", 1)[1] attr.curr_basename = lines[14].split("=", 1)[1] return attr
SUMMARY_PATH = "/mnt/data/results" # SUMMARY_PATH="c:/temp/results" # SUMMARY_PATH="/tmp/results" NETWORK_FORMAT = "Unimodal" IMAGE_FORMAT = "2D" SUMMARY_BASEPATH = create_results_dir(SUMMARY_PATH, NETWORK_FORMAT, IMAGE_FORMAT) # how many times to execute the training/validation/test cycle CYCLES = 20 # # Execution Attributes attr = ExecutionAttribute() # dimensions of our images. attr.img_width, attr.img_height = 96, 96 # network parameters # attr.path='C:/Users/hp/Downloads/cars_train' # attr.path='/home/amenegotto/dataset/2d/sem_pre_proc_mini/ attr.path = '/mnt/data/image/2d/com_pre_proc/' attr.summ_basename = get_base_name(SUMMARY_BASEPATH) attr.s3_path = NETWORK_FORMAT + '/' + IMAGE_FORMAT attr.epochs = 100 attr.batch_size = 128 attr.set_dir_names() if K.image_data_format() == 'channels_first': input_s = (3, attr.img_width, attr.img_height) else: input_s = (attr.img_width, attr.img_height, 3)
attr.path = '/mnt/data/image/2d/sem_pre_proc' attr.set_dir_names() attr.batch_size = 128 # try 4, 8, 16, 32, 64, 128, 256 dependent on CPU/GPU memory capacity (powers of 2 values). attr.epochs = 500 # how many times to execute the training/validation/test cycle CYCLES = 1 for i in range(0, CYCLES): # create the base pre-trained model base_model = InceptionV3(weights='imagenet', include_top=False) # dimensions of our images. # Inception input size attr.img_width, attr.img_height = 299, 299 # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) drop = Dropout(0.20)(x) # and a logistic layer -- we have 2 classes predictions = Dense(2, activation='softmax')(drop) # this is the model we will train attr.model = Model(inputs=base_model.input, outputs=predictions) # first: train only the top layers (which were randomly initialized)
attr = ExecutionAttribute() attr.architecture = 'vgg19' attr.csv_path = 'csv/clinical_data.csv' attr.s3_path = NETWORK_FORMAT + '/' + IMAGE_FORMAT attr.numpy_path = '/mnt/data/image/2d/numpy/' + IMG_TYPE # attr.numpy_path = '/home/amenegotto/dataset/2d/numpy/' + IMG_TYPE attr.path = '/mnt/data/image/2d/' + IMG_TYPE results_path = create_results_dir(SUMMARY_BASEPATH, 'fine-tuning', attr.architecture) attr.summ_basename = get_base_name(results_path) attr.set_dir_names() attr.batch_size = 128 attr.epochs = 500 attr.img_width = 224 attr.img_height = 224 input_attributes_s = (20, ) # how many times to execute the training/validation/test cycle CYCLES = 1 for i in range(0, CYCLES): #Load the VGG model vgg_conv = VGG19(weights='imagenet', include_top=False, input_shape=(attr.img_width, attr.img_height, 3)) # Freeze the layers except the last 4 layers
from keras.optimizers import RMSprop, Adam from ExecutionAttributes import ExecutionAttribute import cv2 import os import pandas as pd import numpy as np from sklearn.metrics import classification_report, confusion_matrix, cohen_kappa_score, roc_auc_score, roc_curve from keras.preprocessing.image import load_img, img_to_array os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin' # Execution Attributes attr = ExecutionAttribute() # dimensions of our images. attr.img_width, attr.img_height = 32, 32 # network parameters attr.path = 'C:/Users/hp/Downloads/cars_train' attr.epochs = 200 attr.batch_size = 8 attr.set_dir_names() if K.image_data_format() == 'channels_first': input_s = (3, attr.img_width, attr.img_height) else: input_s = (attr.img_width, attr.img_height, 3) def load_data(filepath): files = pd.read_csv(filepath)
# Execution Attributes INITIAL_EPOCH=1 attr = ExecutionAttribute() attr.architecture = 'Xception' results_path = create_results_dir('/mnt/data', 'fine-tuning', attr.architecture) attr.summ_basename = get_base_name(results_path) attr.path='/mnt/data/image/2d/com_pre_proc' attr.set_dir_names() attr.batch_size = 64 # try 4, 8, 16, 32, 64, 128, 256 dependent on CPU/GPU memory capacity (powers of 2 values). attr.epochs = 15 # hyper parameters for model nb_classes = 2 # number of classes based_model_last_block_layer_number = 126 # value is based on based model selected. attr.img_width, attr.img_height = 299, 299 # change based on the shape/structure of your images learn_rate = 1e-4 # sgd learning rate momentum = .9 # sgd momentum to avoid local minimum transformation_ratio = .05 # how aggressive will be the data augmentation/transformation if K.image_data_format() == 'channels_first': input_s = (1, attr.img_width, attr.img_height) else: input_s = (attr.img_width, attr.img_height, 1) # define model #attr.model = load_model(attr.summ_basename + '-mid-ckweights.h5') callbacks = [EarlyStopping(monitor='val_loss', patience=5, mode='min', restore_best_weights=True), ModelCheckpoint(attr.summ_basename + "-ckweights.h5", mode='min', verbose=1, monitor='val_loss', save_best_only=True)]