Пример #1
0
def read_attributes(path):
    print("Reading execution attributes from ", path)
    attr = ExecutionAttribute()

    lines = [line.rstrip('\n') for line in open(path)]

    attr.seq = int(lines[0].split("=", 1)[1])
    attr.img_width = int(lines[1].split("=", 1)[1])
    attr.img_height = int(lines[2].split("=", 1)[1])
    attr.path = lines[3].split("=", 1)[1]
    attr.summ_basename = lines[4].split("=", 1)[1]
    attr.epochs = int(lines[5].split("=", 1)[1])
    attr.batch_size = int(lines[6].split("=", 1)[1])
    attr.train_data_dir = lines[7].split("=", 1)[1]
    attr.validation_data_dir = lines[8].split("=", 1)[1]
    attr.test_data_dir = lines[9].split("=", 1)[1]
    attr.steps_train = int(lines[10].split("=", 1)[1])
    attr.steps_valid = int(lines[11].split("=", 1)[1])
    attr.steps_test = int(lines[12].split("=", 1)[1])
    attr.architecture = lines[13].split("=", 1)[1]
    attr.curr_basename = lines[14].split("=", 1)[1]

    return attr
Пример #2
0
# fix seed for reproducible results (only works on CPU, not GPU)
# seed = 9
# np.random.seed(seed=seed)
# tf.set_random_seed(seed=seed)

# Summary Information
SUMMARY_PATH = "/mnt/data/results"
NETWORK_FORMAT = "Unimodal"
IMAGE_FORMAT = "2D"
SUMMARY_BASEPATH = create_results_dir(SUMMARY_PATH, NETWORK_FORMAT,
                                      IMAGE_FORMAT)

# Execution Attributes
attr = ExecutionAttribute()
attr.architecture = 'InceptionV3'

results_path = create_results_dir(SUMMARY_BASEPATH, 'fine-tuning',
                                  attr.architecture)
attr.summ_basename = get_base_name(results_path)
attr.s3_path = NETWORK_FORMAT + '/' + IMAGE_FORMAT
attr.path = '/mnt/data/image/2d/sem_pre_proc'
attr.set_dir_names()
attr.batch_size = 128  # try 4, 8, 16, 32, 64, 128, 256 dependent on CPU/GPU memory capacity (powers of 2 values).
attr.epochs = 500

# how many times to execute the training/validation/test cycle
CYCLES = 1

for i in range(0, CYCLES):
Пример #3
0
# Summary Information
IMG_TYPE = "com_pre_proc/"
SUMMARY_PATH = "/mnt/data/results"
# SUMMARY_PATH="c:/temp/results"
# SUMMARY_PATH="/tmp/results"
NETWORK_FORMAT = "Multimodal"
IMAGE_FORMAT = "2D"
SUMMARY_BASEPATH = create_results_dir(SUMMARY_PATH, NETWORK_FORMAT,
                                      IMAGE_FORMAT)
INTERMEDIATE_FUSION = True
LATE_FUSION = False

# Execution Attributes
attr = ExecutionAttribute()
attr.architecture = 'vgg19'
attr.csv_path = 'csv/clinical_data.csv'
attr.s3_path = NETWORK_FORMAT + '/' + IMAGE_FORMAT
attr.numpy_path = '/mnt/data/image/2d/numpy/' + IMG_TYPE
# attr.numpy_path = '/home/amenegotto/dataset/2d/numpy/' + IMG_TYPE
attr.path = '/mnt/data/image/2d/' + IMG_TYPE

results_path = create_results_dir(SUMMARY_BASEPATH, 'fine-tuning',
                                  attr.architecture)
attr.summ_basename = get_base_name(results_path)
attr.set_dir_names()
attr.batch_size = 128
attr.epochs = 500

attr.img_width = 224
attr.img_height = 224
Пример #4
0
# Summary Information
SUMMARY_PATH="/mnt/data/fine-tuning/Xception"
#SUMMARY_PATH="c:/temp/results"
#SUMMARY_PATH="/tmp/results"
NETWORK_FORMAT="Unimodal"
IMAGE_FORMAT="2D"
SUMMARY_BASEPATH=create_results_dir(SUMMARY_PATH, NETWORK_FORMAT, IMAGE_FORMAT)

# how many times to execute the training/validation/test
CYCLES = 1

#
# Execution Attributes
INITIAL_EPOCH=1
attr = ExecutionAttribute()
attr.architecture = 'Xception'

results_path = create_results_dir('/mnt/data', 'fine-tuning', attr.architecture)
attr.summ_basename = get_base_name(results_path)
attr.path='/mnt/data/image/2d/com_pre_proc'
attr.set_dir_names()
attr.batch_size = 64  # try 4, 8, 16, 32, 64, 128, 256 dependent on CPU/GPU memory capacity (powers of 2 values).
attr.epochs = 15

# hyper parameters for model
nb_classes = 2  # number of classes
based_model_last_block_layer_number = 126  # value is based on based model selected.
attr.img_width, attr.img_height = 299, 299  # change based on the shape/structure of your images
learn_rate = 1e-4  # sgd learning rate
momentum = .9  # sgd momentum to avoid local minimum
transformation_ratio = .05  # how aggressive will be the data augmentation/transformation