Esempio n. 1
0
        print("No test data")
        exit()
    if 'vision_path' in dir() and 'vision_path' is not None and not os.access(
            vision_path, os.F_OK):
        os.makedirs(vision_path)
    if os.access(evaluation_path, os.F_OK):
        shutil.rmtree(evaluation_path)
    os.makedirs(evaluation_path)

    x = tf.placeholder(tf.float32,
                       [None, REGION_SIZE, REGION_SIZE, REGION_SIZE])
    x_image = tf.reshape(x, [-1, REGION_SIZE, REGION_SIZE, REGION_SIZE, 1])
    bn_params = np.load(bn_file)
    outputs, _, _ = tft.volume_bndo_flbias_l5_30(
        x_image,
        dropout_rate=0.0,
        batch_normalization_statistic=False,
        bn_params=bn_params)
    prediction_out = outputs['sm_out']
    saver = tf.train.Saver()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    saver.restore(sess, net_file)

    start_time = time.time()
    #patient_evaluations = open(evaluation_path + "/patient_evaluations.log", "w")
    results = []
    CPMs = []
    CPMs2 = []
    test_patients = all_patients[4:5]
negative_val_indices = [i for i in range(negative_val_num)]

#random.shuffle(positive_train_indices)
#random.shuffle(positive_val_indices)

#net construct
bn_params = np.load(net_init_path + "/batch_normalization_statistic1.npy")
volume_input = tf.placeholder(tf.float32,
                              [None, REGION_SIZE, REGION_SIZE, REGION_SIZE])
volume_reshape = tf.reshape(volume_input,
                            [-1, REGION_SIZE, REGION_SIZE, REGION_SIZE, 1])
real_label = tf.placeholder(tf.float32, [None, 2])
#r_bn1, b_bn1, w_conv1, w_conv2, out_conv1, out_bn1, hidden_conv1, hidden_conv2, hidden_conv3, out_fc1, out_fc2, softmax_out = tft.volume_bnnet2_l6_56(volume_reshape)
net_outs, _, _ = tft.volume_bndo_flbias_l6_40(
    volume_reshape,
    dropout_rate=0.0,
    batch_normalization_statistic=False,
    bn_params=bn_params)
out_fc2 = net_outs['last_out']
softmax_out = net_outs['sm_out']
correct_prediction = tf.equal(tf.argmax(softmax_out, 1),
                              tf.argmax(real_label, 1))
batch_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

extract_volumes = ft.partial(mt.extract_volumes,
                             volume_shape=np.int_(
                                 [REGION_SIZE, REGION_SIZE, REGION_SIZE]),
                             centering=CENTERING,
                             scale_augment=SCALE_AUGMENTATION,
                             translation_augment=TRANSLATION_AUGMENTATION,
                             rotation_augment=ROTATION_AUGMENTATION,
#negative_importances = 1000*np.ones(shape=[num_negative], dtype=float)

#net construct
volume_input = tf.placeholder(tf.float32,
                              [None, REGION_SIZE, REGION_SIZE, REGION_SIZE])
volume_reshape = tf.reshape(volume_input,
                            [-1, REGION_SIZE, REGION_SIZE, REGION_SIZE, 1])
real_label = tf.placeholder(tf.float32, [None, 2])
#r_bn1, b_bn1, w_conv1, w_conv2, out_conv1, out_bn1, hidden_conv1, hidden_conv2, hidden_conv3, out_fc1, out_fc2, softmax_out = tft.volume_bnnet2_l6_56(volume_reshape)
#bn_params = np.load(net_init_path + "/batch_normalization_statistic.npy")
positive_confidence = num_positive_aug / float(num_positive_aug +
                                               num_positive * np_proportion)
net_outs, _, bn_params = tft.volume_bndo_flbias_l6_40(
    volume_reshape,
    True,
    positive_confidence,
    dropout_rate=0.0,
    batch_normalization_statistic=True,
    bn_params=None)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
    logits=net_outs['last_out'], labels=real_label)
balancing = real_label[:, 0] + tf.pow(tf.constant(-1, dtype=tf.float32),
                                      real_label[:, 0]) * tf.constant(
                                          positive_confidence, tf.float32)
modulation = tf.pow(
    real_label[:, 0] +
    tf.pow(tf.constant(-1, dtype=tf.float32), real_label[:, 0]) *
    net_outs['sm_out'][:, 0], tf.constant(2, dtype=tf.float32))
focal_loss = modulation * balancing * cross_entropy
batch_loss = tf.reduce_mean(focal_loss)
correct_prediction = tf.equal(tf.argmax(net_outs['sm_out'], 1),
Esempio n. 4
0
dfile_storage.close()

val_num = int(num_total * VALIDATION_RATE)
#positive_val_num = 1
train_num = num_total - val_num
train_indices = fileindices[:train_num]
val_indices = fileindices[train_num:]

#net construct
volume_input = tf.placeholder(tf.float32,
                              [None, REGION_SIZE, REGION_SIZE, REGION_SIZE])
volume_reshape = tf.reshape(volume_input,
                            [-1, REGION_SIZE, REGION_SIZE, REGION_SIZE, 1])
real_label = tf.placeholder(tf.float32, [None, 2])
#r_bn1, b_bn1, w_conv1, w_conv2, out_conv1, out_bn1, hidden_conv1, hidden_conv2, hidden_conv3, out_fc1, out_fc2, softmax_out = tft.volume_bnnet2_l6_56(volume_reshape)
out_fc2, softmax_out = tft.volume_bnnet_flbias_l6_56(volume_reshape)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=out_fc2,
                                                        labels=real_label)
#pm = tf.pow(tf.constant(-1, dtype=tf.float32), real_label[:,0])
#prediction_pm = tf.multiply(pm, softmax_out[:,0])
#addition = tf.add(real_label[:,0], prediction_pm)
#modulation = tf.pow(addition, tf.constant(2, dtype=tf.float32))
modulation = tf.pow(
    tf.add(
        real_label[:, 0],
        tf.multiply(
            tf.pow(tf.constant(-1, dtype=tf.float32), real_label[:, 0]),
            softmax_out[:, 0])), tf.constant(2, dtype=tf.float32))
focal_loss = tf.multiply(modulation, cross_entropy)
batch_loss = tf.reduce_mean(focal_loss)
tf.summary.scalar('loss', batch_loss)
     tf.reshape(inputs[1],
                [-1, REGION_SIZES[1], REGION_SIZES[1], REGION_SIZES[1], 1]),
     tf.reshape(inputs[2],
                [-1, REGION_SIZES[2], REGION_SIZES[2], REGION_SIZES[2], 1])
 ]
 if "bn_files" in dir():
     bn_params = [
         np.load(bn_files[0]),
         np.load(bn_files[1]),
         np.load(bn_files[2])
     ]
 else:
     bn_params = [None, None, None]
 outputs0, variables0, _ = tft.volume_bndo_flbias_l5_20(
     inputs_reshape[0],
     dropout_rate=0.0,
     batch_normalization_statistic=False,
     bn_params=bn_params[0])
 outputs1, variables1, _ = tft.volume_bndo_flbias_l5_30(
     inputs_reshape[1],
     dropout_rate=0.0,
     batch_normalization_statistic=False,
     bn_params=bn_params[1])
 outputs2, variables2, _ = tft.volume_bndo_flbias_l6_40(
     inputs_reshape[2],
     dropout_rate=0.0,
     batch_normalization_statistic=False,
     bn_params=bn_params[2])
 if FUSION_MODE == 'vote':
     predictions = [
         outputs0['sm_out'], outputs1['sm_out'], outputs2['sm_out']
    tf.reshape(volume_inputs[2],
               [-1, REGION_SIZES[2], REGION_SIZES[2], REGION_SIZES[2], 1])
]
real_label = tf.placeholder(tf.float32, [None, 2])
#r_bn1, b_bn1, w_conv1, w_conv2, out_conv1, out_bn1, hidden_conv1, hidden_conv2, hidden_conv3, out_fc1, softmax_out, softmax_out = tft.volume_bnnet2_l6_56(volume_reshape)
if "bn_files" in dir():
    bn_params = [
        np.load(bn_files[0]),
        np.load(bn_files[1]),
        np.load(bn_files[2])
    ]
else:
    bn_params = [None, None, None]
net_outs0, variables0, _ = tft.volume_bndo_flbias_l5_20(
    volumes_reshape[0],
    False,
    dropout_rate=0.0,
    batch_normalization_statistic=False,
    bn_params=bn_params[0])
net_outs1, variables1, _ = tft.volume_bndo_flbias_l5_30(
    volumes_reshape[1],
    False,
    dropout_rate=0.0,
    batch_normalization_statistic=False,
    bn_params=bn_params[1])
net_outs2, variables2, _ = tft.volume_bndo_flbias_l6_40(
    volumes_reshape[2],
    False,
    dropout_rate=0.0,
    batch_normalization_statistic=False,
    bn_params=bn_params[2])
if FUSION_MODE == 'late':
Esempio n. 7
0
]
real_label = tf.placeholder(tf.float32, [None, 2])
#bn_params = np.load(net_init_path + "/batch_normalization_statistic.npy")
positive_confidence = aug_proportion / float(aug_proportion + np_proportion)
if "bn_files" in dir():
    bn_params = [
        np.load(bn_files[0]),
        np.load(bn_files[1]),
        np.load(bn_files[2])
    ]
else:
    bn_params = [None, None, None]
net_outs0, variables0, _ = tft.volume_bndo_flbias_l5_20(
    volumes_reshape[0],
    False,
    positive_confidence,
    dropout_rate=0.0,
    batch_normalization_statistic=False,
    bn_params=bn_params[0])
net_outs1, variables1, _ = tft.volume_bndo_flbias_l5_30(
    volumes_reshape[1],
    False,
    positive_confidence,
    dropout_rate=0.0,
    batch_normalization_statistic=False,
    bn_params=bn_params[1])
net_outs2, variables2, _ = tft.volume_bndo_flbias_l6_40(
    volumes_reshape[2],
    False,
    positive_confidence,
    dropout_rate=0.0,
Esempio n. 8
0
if positive_train_num == 0:
    print("no positive training file found")
    exit()
positive_train_indices = [i for i in range(positive_train_num)]
positive_val_indices = [i for i in range(positive_val_num)]
#random.shuffle(positive_indices)
negative_importances = 1000 * np.ones(shape=[num_negative], dtype=float)

#net construct
volume_input = tf.placeholder(tf.float32,
                              [None, REGION_SIZE, REGION_SIZE, REGION_SIZE])
volume_reshape = tf.reshape(volume_input,
                            [-1, REGION_SIZE, REGION_SIZE, REGION_SIZE, 1])
real_label = tf.placeholder(tf.float32, [None, 2])
#r_bn1, b_bn1, w_conv1, w_conv2, out_conv1, out_bn1, hidden_conv1, hidden_conv2, hidden_conv3, out_fc1, out_fc2, softmax_out = tft.volume_bnnet2_l6_56(volume_reshape)
out_fc2, softmax_out = tft.volume_net3_l6_56(volume_reshape)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=out_fc2,
                                                        labels=real_label)
#focal_loss = tf.scalar_mul(tf.pow(softmax_out[1], tf.constant(2)), cross_entropy)
batch_loss = tf.reduce_mean(cross_entropy)
tf.summary.scalar('loss', batch_loss)
correct_prediction = tf.equal(tf.argmax(softmax_out, 1),
                              tf.argmax(real_label, 1))
batch_accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('acc', batch_accuracy)
merged = tf.summary.merge_all()
'''
trains = []
epochs = []
learning_rate = INITIAL_LEARNING_RATE
for ti in range(0, NUM_EPOCH, DECAY_EPOCH):