Пример #1
0
def train_xy_init_model(num_classes, epochs):
    # first get the time
    now = datetime.datetime.now()
    now_str = now.strftime('%Y-%m-%d_%H-%M-%S')
    print("Started at {}".format(now_str))
    # then create a new model folder based on name and date
    model_name = "XY-Unet-Init"
    rootDir = "C:/users/jfauser/IPCAI2019/ModelData/" + model_name + "/"
    out_dir = rootDir + now_str + "/"
    if not os.path.isdir(out_dir):
        os.mkdir(out_dir)

    # compile the net architecture
    batch_size = 16
    input_shape = (512, 512)
    model = unet(input_shape + (1, ), num_classes)

    # load the two training sets
    print("loading data set 1")
    if num_classes == 2:
        images_1, labels_1 = tools.get_data_set_for_chorda(rootDir +
                                                           "dataset1.h5")
    else:
        images_1, labels_1 = tools.get_data_set(rootDir + "dataset1.h5")

    print("loading data set 2")
    if num_classes == 2:
        images_2, labels_2 = tools.get_data_set_for_chorda(rootDir +
                                                           "dataset2.h5")
    else:
        images_2, labels_2 = tools.get_data_set(rootDir + "dataset2.h5")

    # and train
    filename_weigths1 = out_dir + "weights1.hdf5"
    filename_weigths2 = out_dir + "weights2.hdf5"

    print("train model 1")
    train_model.train_model([images_1, labels_1], filename_weigths1, model,
                            batch_size,
                            epochs)  # commences training on the set
    del model

    print("train model 2")
    model = unet(input_shape + (1, ), num_classes)
    train_model.train_model([images_2, labels_2], filename_weigths2, model,
                            batch_size,
                            epochs)  # commences training on the set
    del model

    return now_str
Пример #2
0
def get_regularized_constant():
    regularization_list = []
    data_set = get_data_set(add_extra_feature=True)
    for c in (2 ** power for power in xrange(-10, 10)):
        print "C: %f" % c
        regularization_list.append((cross_validation(c, data_set), c))
    print sorted(regularization_list)
    return sorted(regularization_list)[0]
Пример #3
0
def get_regularized_constant(kernel_func=lambda x, y: x.dot(y.T)):
    regularization_list = []
    data_set = get_data_set()
    for c in (10 ** power for power in xrange(0, 1)):
        print c
        regularization_list.append((cross_validation(c, data_set, kernel_func), c))
    print sorted(regularization_list)
    return sorted(regularization_list)[0]
Пример #4
0
def get_regularized_constant():
    regularization_list = []
    data_set = get_data_set(add_extra_feature=True)
    for c in (2**power for power in xrange(-10, 10)):
        print "C: %f" % c
        regularization_list.append((cross_validation(c, data_set), c))
    print sorted(regularization_list)
    return sorted(regularization_list)[0]
Пример #5
0
def get_regularized_constant(kernel_func=lambda x, y: x.dot(y.T)):
    regularization_list = []
    data_set = get_data_set()
    for c in (10**power for power in xrange(0, 1)):
        print c
        regularization_list.append((cross_validation(c, data_set,
                                                     kernel_func), c))
    print sorted(regularization_list)
    return sorted(regularization_list)[0]
Пример #6
0
def main():
    """
    Best result gives polynominal kernel with d = 1 and C = 100
    mean error_rate: 0.00526

    Also, polynomal kernel with d = 3 and C = 1000 gives decent result
    mean error_rate: 0.06316

    Gaussian kernels works pretty bad, i haven't found any (gamma, C) with decent error rate.

    to repeat C and kernels analysis, uncomment analyze_C_and_kernels()
    """
    # analyze_C_and_kernels()

    # time = datetime.datetime.now()
    C = 100
    print "error_rate mean: %0.5f" % cross_validation(C, get_data_set(), polynominal_kernel(1))
Пример #7
0
def main():
    """
    Best result gives polynominal kernel with d = 1 and C = 100
    mean error_rate: 0.00526

    Also, polynomal kernel with d = 3 and C = 1000 gives decent result
    mean error_rate: 0.06316

    Gaussian kernels works pretty bad, i haven't found any (gamma, C) with decent error rate.

    to repeat C and kernels analysis, uncomment analyze_C_and_kernels()
    """
    # analyze_C_and_kernels()

    # time = datetime.datetime.now()
    C = 100
    print "error_rate mean: %0.5f" % cross_validation(C, get_data_set(),
                                                      polynominal_kernel(1))