kernel_size=kernel_size,
                               filter_size=filter_size,
                               max_pool=max_pool,
                               dense_size=dense_size,
                               conv_type=conv_type,
                               batch_norm=batch_norm,
                               kernel_regularizer=kernel_regularizer)
    model.summary()
    key = input('Continue [Y/n]')
    if key == 'n':
        break

    #Create IDs
    model_id = "{:s}{:s}{}".format(
        str(lr).replace('.', 'e'), optimizer, int(time()))
    dataset_id = ids.get_dataset_id(dataset_dir)
    print("-" * 30)
    print("{:s}".format(dataset_id))
    print("{:s}".format(model_id))
    print("-" * 30)

    final_id = data_type + scan_type + model_id + '_' + net_id
    #Write Summary--------------------------------------------
    # Open the file
    cwd = os.getcwd()  #directory where the script is called by terminal
    with open('{}/summary{}.txt'.format(cwd, final_id), 'w') as fh:
        # Pass the file handle in as a lambda function to make it callable
        model.summary(print_fn=lambda x: fh.write(x + '\n'))

    model.compile(loss='binary_crossentropy',
                  optimizer=opti,
Пример #2
0
pfa_re = re.compile(r"P\de\d+")
dataset_id_re = re.compile(r"Tex\d+\_\d+")
data_re = re.compile(r"[a-zA-Z]+$")
fail_re = re.compile(r"aaandnadnadnan")

list_re = []
list_re.append(num_acq_re)
list_re.append(pfa_re)
list_re.append(dataset_id_re)
list_re.append(data_re)
list_re.append(data_re)
list_re.append(fail_re)
#for rr in list_re:
#    print (rr.search(dataset_dir).group())
    
print(ids.get_dataset_id(dataset_dir))
#%%
str_in = 'B_FA566_2017_07_07_003Swath1x0_ch0_y0.png'
str_in = 'B_T66_2017_07_07_003Swath1x0_ch0_y0.png'

import re
anomaly_id = re.compile(r"\_(T|FA)\d+\_")
acq_id = re.compile(r"_\d\d\d\d\_\d\d_\d\d[^x]*")
is_central = re.compile(r"x0\_ch0\_y0\.")
    
list_re = []
list_re.append(anomaly_id)
list_re.append(acq_id)
list_re.append(is_central)

for rr in list_re: