Example #1
0
def read_attributes(path):
    print("Reading execution attributes from ", path)
    attr = ExecutionAttribute()

    lines = [line.rstrip('\n') for line in open(path)]

    attr.seq = int(lines[0].split("=", 1)[1])
    attr.img_width = int(lines[1].split("=", 1)[1])
    attr.img_height = int(lines[2].split("=", 1)[1])
    attr.path = lines[3].split("=", 1)[1]
    attr.summ_basename = lines[4].split("=", 1)[1]
    attr.epochs = int(lines[5].split("=", 1)[1])
    attr.batch_size = int(lines[6].split("=", 1)[1])
    attr.train_data_dir = lines[7].split("=", 1)[1]
    attr.validation_data_dir = lines[8].split("=", 1)[1]
    attr.test_data_dir = lines[9].split("=", 1)[1]
    attr.steps_train = int(lines[10].split("=", 1)[1])
    attr.steps_valid = int(lines[11].split("=", 1)[1])
    attr.steps_test = int(lines[12].split("=", 1)[1])
    attr.architecture = lines[13].split("=", 1)[1]
    attr.curr_basename = lines[14].split("=", 1)[1]

    return attr
Example #2
0
SUMMARY_BASEPATH = create_results_dir(SUMMARY_PATH, NETWORK_FORMAT, IMAGE_FORMAT)

# how many times to execute the training/validation/test cycle
CYCLES = 20

#
# Execution Attributes
attr = ExecutionAttribute()

# dimensions of our images.
attr.img_width, attr.img_height = 96, 96

# network parameters
# attr.path='C:/Users/hp/Downloads/cars_train'
# attr.path='/home/amenegotto/dataset/2d/sem_pre_proc_mini/
attr.path = '/mnt/data/image/2d/com_pre_proc/'
attr.summ_basename = get_base_name(SUMMARY_BASEPATH)
attr.s3_path = NETWORK_FORMAT + '/' + IMAGE_FORMAT
attr.epochs = 100
attr.batch_size = 128
attr.set_dir_names()

if K.image_data_format() == 'channels_first':
    input_s = (3, attr.img_width, attr.img_height)
else:
    input_s = (attr.img_width, attr.img_height, 3)

for i in range(0, CYCLES):
    # define model
    attr.model = Sequential()
    attr.model.add(Conv2D(128, (3, 3), input_shape=input_s, kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0005)))
Example #3
0
SUMMARY_BASEPATH = create_results_dir(SUMMARY_PATH, NETWORK_FORMAT, IMAGE_FORMAT)
INTERMEDIATE_FUSION = True
LATE_FUSION = False

# how many times to execute the training/validation/test cycle
CYCLES = 1

# Execution Attributes
attr = ExecutionAttribute()

# dimensions of our images.
attr.img_width, attr.img_height = 96, 96

# network parameters
attr.csv_path = 'csv/clinical_data.csv'
attr.path = '/mnt/data/image/2d/' + IMG_TYPE
# attr.path = '/home/amenegotto/dataset/2d/' + IMG_TYPE
attr.numpy_path = '/mnt/data/image/2d/numpy/' + IMG_TYPE
# attr.numpy_path = '/home/amenegotto/dataset/2d/numpy/' + IMG_TYPE
attr.summ_basename = get_base_name(SUMMARY_BASEPATH)
attr.epochs = 2
attr.batch_size = 32
attr.set_dir_names()

if K.image_data_format() == 'channels_first':
    input_image_s = (3, attr.img_width, attr.img_height)
else:
    input_image_s = (attr.img_width, attr.img_height, 3)

input_attributes_s = (20,)
Example #4
0
# Summary Information
SUMMARY_PATH = "/mnt/data/results"
NETWORK_FORMAT = "Unimodal"
IMAGE_FORMAT = "2D"
SUMMARY_BASEPATH = create_results_dir(SUMMARY_PATH, NETWORK_FORMAT,
                                      IMAGE_FORMAT)

# Execution Attributes
attr = ExecutionAttribute()
attr.architecture = 'InceptionV3'

results_path = create_results_dir(SUMMARY_BASEPATH, 'fine-tuning',
                                  attr.architecture)
attr.summ_basename = get_base_name(results_path)
attr.s3_path = NETWORK_FORMAT + '/' + IMAGE_FORMAT
attr.path = '/mnt/data/image/2d/sem_pre_proc'
attr.set_dir_names()
attr.batch_size = 128  # try 4, 8, 16, 32, 64, 128, 256 dependent on CPU/GPU memory capacity (powers of 2 values).
attr.epochs = 500

# how many times to execute the training/validation/test cycle
CYCLES = 1

for i in range(0, CYCLES):

    # create the base pre-trained model
    base_model = InceptionV3(weights='imagenet', include_top=False)

    # dimensions of our images.
    # Inception input size
    attr.img_width, attr.img_height = 299, 299
Example #5
0
import os
import pandas as pd
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix, cohen_kappa_score, roc_auc_score, roc_curve
from keras.preprocessing.image import load_img, img_to_array

os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin'

# Execution Attributes
attr = ExecutionAttribute()

# dimensions of our images.
attr.img_width, attr.img_height = 32, 32

# network parameters
attr.path = 'C:/Users/hp/Downloads/cars_train'
attr.epochs = 200
attr.batch_size = 8
attr.set_dir_names()

if K.image_data_format() == 'channels_first':
    input_s = (3, attr.img_width, attr.img_height)
else:
    input_s = (attr.img_width, attr.img_height, 3)


def load_data(filepath):
    files = pd.read_csv(filepath)
    images = []
    prices = files[['price']]
    labels = []
Example #6
0
# PURPOSE:
# create numpy arrays to store all data needed on multimodal networks and save them to disk
# for later reuse on training/validation/test cycle for faster initialization

import numpy as np
from Datasets import create_data_as_numpy, create_data_as_list
from ExecutionAttributes import ExecutionAttribute

attr = ExecutionAttribute()

# dimensions of our images.
attr.img_width, attr.img_height = 96, 96

# network parameters
attr.csv_path = 'csv/clinical_data.csv'

is_categorical = True
image_type = 'com_pre_proc/'
npy_path = '/mnt/data/image/2d/numpy/' + image_type
# npy_path = '/home/amenegotto/dataset/2d/numpy/' + image_type
attr.path = '/mnt/data/image/2d/' + image_type
# attr.path = '/home/amenegotto/dataset/2d/' + image_type

attr.set_dir_names()

# First multimodal approach using all dataset as numpy array aka Fat Memory Approach...
#create_data_as_numpy(attr.path, attr.csv_path, attr.img_width, attr.img_height, True, npy_path)

# Third multimodal approach for a thread-safe custom generator
create_data_as_list(attr.path, attr.csv_path, True, npy_path, is_categorical)