コード例 #1
0
def run_model():
    data_dir = base_dir + '/ultrasound-nerve-segmentation/train_data_sample'
    model_dir = base_dir + '/models'

    IMG_HEIGHT = 256
    IMG_WIDTH = 256
    NUM_CHANNELS = 3
    OUTPUT_CHANNELS = 1

    train_data = load_data(data_dir, [IMG_WIDTH, IMG_HEIGHT])
    print(train_data)

    #N = 5635
    N = 500

    tag = 'unet_model_b5_relu_dil_w10'
    model_dir += '/{0}'.format(tag)

    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    # for image, mask in train_data.take(1):
    #     sample_image, sample_mask = image, mask

    BATCH_SIZE = 32
    BUFFER_SIZE = 1000
    STEPS_PER_EPOCH = N // BATCH_SIZE

    train_dataset = train_data.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    #test_dataset = test.batch(BATCH_SIZE)

    EPOCHS = 250
    VAL_SUBSPLITS = 5
    #VALIDATION_STEPS = info.splits['test'].num_examples//BATCH_SIZE//VAL_SUBSPLITS
    VALIDATION_STEPS = N // BATCH_SIZE // VAL_SUBSPLITS

    model = model4(tag, (IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS, OUTPUT_CHANNELS),
                   'relu', 10)

    model_save_callback = tf.keras.callbacks.ModelCheckpoint(
        model_dir + '/model_epoch_{epoch:04d}.h5',
        save_weights_only=False,
        period=25)

    model_history = model.fit(train_dataset,
                              epochs=EPOCHS,
                              steps_per_epoch=STEPS_PER_EPOCH,
                              validation_steps=VALIDATION_STEPS,
                              validation_data=train_dataset,
                              callbacks=[model_save_callback])

    sigmoid = lambda x: 1 / (1 + np.exp(-1 * x))

    model.save(model_dir + '/model_final.h5')
コード例 #2
0
def predict(tag, data_tag, n):
    # tag = 'unet_model1_elu2'
    # data_tag = 'test'
    # n = 500
    
    
    model_dir = base_dir + '/models'
    
    #tag = 'unet_model_b5_relu'
    model_dir += '/{0}'.format(tag)
    
    
    if (data_tag == 'train'):
        data_dir = base_dir + '/ultrasound-nerve-segmentation/train_data_sample'
    else:
        data_dir = base_dir + '/ultrasound-nerve-segmentation/test_data_sample'
        
    predictions_dir = base_dir + '/predictions_epochs/{0}/{1}'.format(tag, data_tag)
    prediction_results_dir = base_dir + '/prediction_results_epochs/{0}/{1}'.format(tag, data_tag)
    prediction_plots_dir = base_dir + '/plots/predictions_epochs/{0}/{1}'.format(tag, data_tag)
    
    if (not os.path.exists(predictions_dir)):
        os.makedirs(predictions_dir)
        
    if (not os.path.exists(prediction_results_dir)):
        os.makedirs(prediction_results_dir)
    
    if (not os.path.exists(prediction_plots_dir)):
        os.makedirs(prediction_plots_dir)
        
    loss = weighted_binary_crossentropy(pos_weight=10)
    dice_loss = dice_coefficient()
    tvk_loss = tversky_loss(0.9)
    #model = tf.keras.models.load_model(model_dir + '/{0}.h5'.format(tag))
    #model = tf.keras.models.load_model(model_dir + '/model_final.h5', custom_objects = {'_weighted_binary_crossentropy': loss, '_dice_coefficient': dice_loss, '_tversky_loss': tvk_loss})
    
    # IMG_HEIGHT = 128
    # IMG_WIDTH = 128
    IMG_HEIGHT = 256
    IMG_WIDTH = 256
    
    if ('b5' in tag ):
        if (IMG_HEIGHT != 256):
            print('SIZE MISMATCH')
            sys.exit(0)
    
    NUM_CHANNELS = 3
    OUTPUT_CHANNELS = 1
    
    data = load_data(data_dir, [IMG_WIDTH, IMG_HEIGHT])
    
    sigmoid = lambda x: 1/(1+np.exp(-1*x))
    
    
    epochs = np.arange(275, 501, 25)
    #probs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    prob = 0.5
    
    for epoch in epochs:
        model = tf.keras.models.load_model(model_dir + '/model_epoch_{epoch:04d}.h5'.format(epoch = epoch), custom_objects = {'_weighted_binary_crossentropy': loss, '_dice_coefficient': dice_loss, '_tversky_loss': tvk_loss})
        
        fp_out = open(prediction_results_dir + '/results_img_{0}.csv'.format(epoch), 'w')
        fp_out.write('img,pos_pixels,neg_pixels,tp,fp,tn,fn' + '\n')
        
        y_pos_total = 0
        y_neg_total = 0
        tp_total = 0
        fp_total = 0
        tn_total = 0
        fn_total = 0
        dice_coeff_sum = 0
        iou_sum = 0
        
        i = 0
        dice_cnt = 0
        iou_cnt = 0
        cnt = 0
        for image, mask in data.take(n):
            pred_mask = model.predict(image[tf.newaxis, ...])
            pred_mask = pred_mask[0, :, :, :]
            pred_mask = sigmoid(pred_mask)
            print(np.mean(pred_mask))
            
            fp = open(predictions_dir + '/true_mask_{0}.pkl'.format(i), 'wb')
            pickle.dump(mask, fp)
            fp.close()
            fp = open(predictions_dir + '/pred_mask_{0}.pkl'.format(i), 'wb')
            pickle.dump(pred_mask, fp)
            fp.close()
            
            # print(np.round(pred_mask, 2))
            # print(mask)
            
            #0.500005, 0.500001, 0.50001
            #pred_mask_label = (pred_mask > 0.6).astype(float)
            #pred_mask_label = (pred_mask > 0.015).astype(float)
            #pred_mask_label = (pred_mask > 0.5001).astype(float)
            
            pred_mask_label = (pred_mask > prob).astype(float)
            
            true_mask_pos_index = (mask == 1.0)
            true_mask_neg_index = (mask != 1.0)
            # print(true_mask_pos_index)
            # print(true_mask_neg_index)
            y_pos = np.sum(true_mask_pos_index)
            y_neg = np.sum(true_mask_neg_index)
            
            tp = np.sum(pred_mask_label[true_mask_pos_index])
            fp = np.sum(pred_mask_label[true_mask_neg_index])
            tn = y_neg - fp
            fn = y_pos - tp
            dice_coeff = round((2*tp)/(2*tp + fp + fn), 6) if tp + fp + fn != 0 else 1.0
            iou = round(tp/(tp + fp + fn), 6) if tp + fp + fn != 0 else 1.0
            
            print(y_pos, y_neg)
            print(tp, fp, tn, fn)
            print(dice_coeff, iou)
            
            fp_out.write(','.join(['img{0}'.format(i), str(y_pos), str(y_neg), str(tp), str(fp), str(tn), str(fn), str(dice_coeff), str(iou)]) + '\n')
            
            y_pos_total += y_pos
            y_neg_total += y_neg
            tp_total += tp
            fp_total += fp
            tn_total += tn
            fn_total += fn
            dice_coeff_sum += dice_coeff
            iou_sum += iou
            
            # if (not np.isnan(dice_coeff)):
            #     dice_coeff_sum += dice_coeff
            #     dice_cnt += 1
            # if (not np.isnan(iou)):
            #     iou_sum += iou
            #     iou_cnt += 1
                
            cnt += 1
            
            conf_matrix = np.array([[tn, fp], [fn, tp]]).astype(int)
            print(conf_matrix)
            
            #display([image, mask, pred_mask_label], prediction_plots_dir + '/img_{0}.png'.format(i))
            #display([image, mask, pred_mask_label])
        
        
        #sys.exit(0)
        
            i += 1
        
        dice_coeff_avg = round(dice_coeff_sum/cnt, 6)
        iou_avg = round(iou_sum/cnt, 6)
        
        dice_coeff_agg = round((2*tp_total)/(2*tp_total + fp_total + fn_total), 6)
        iou_agg = round(tp_total/(tp_total + fp_total + fn_total), 6)
        
        output_line = 'total,' + ','.join([str(x) for x in [y_pos_total, y_neg_total, tp_total, fp_total, tn_total, fn_total, dice_coeff_avg, iou_avg, dice_coeff_agg, iou_agg]])
        
        fp_out.write(output_line + '\n')
    
        fp_out.close()
コード例 #3
0
def model1():
    data_dir = base_dir + '/ultrasound-nerve-segmentation/train_data_sample'
    model_dir = base_dir + '/models'

    IMG_HEIGHT = 128
    IMG_WIDTH = 128
    NUM_CHANNELS = 3
    OUTPUT_CHANNELS = 1

    train_data = load_data(data_dir, [IMG_WIDTH, IMG_HEIGHT])
    print(train_data)

    #N = 5635
    N = 500

    tag = 'unet_model1_rrelu_dice'
    model_dir += '/{0}'.format(tag)

    if not os.path.exists(model_dir):
        os.makedirs(model_dir)

    # for image, mask in train_data.take(1):
    #     sample_image, sample_mask = image, mask

    ### down 1
    inputs = tf.keras.layers.Input((IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS))

    pooling1, conv1b = downsampling_block(inputs, 16, (3, 3), (2, 2), 'relu',
                                          'down1')

    ### down 2
    pooling2, conv2b = downsampling_block(pooling1, 32, (3, 3), (2, 2), 'relu',
                                          'down2')

    ### down 3
    pooling3, conv3b = downsampling_block(pooling2, 64, (3, 3), (2, 2), 'relu',
                                          'down3')

    ### down 4
    pooling4, conv4b = downsampling_block(pooling3, 128, (3, 3), (2, 2),
                                          'relu', 'down4')

    ### down 5
    conv5a = tf.keras.layers.Conv2D(256, (3, 3),
                                    activation=tf.keras.activations.relu,
                                    kernel_initializer='he_normal',
                                    padding='same')(pooling4)
    conv5b = tf.keras.layers.Conv2D(256, (3, 3),
                                    activation=tf.keras.activations.relu,
                                    kernel_initializer='he_normal',
                                    padding='same')(conv5a)

    ### up 1
    conv6b = upsampling_block(conv5b, conv4b, 128, (3, 3), (2, 2), 'relu',
                              'up1')

    ### up 2
    conv7b = upsampling_block(conv6b, conv3b, 64, (3, 3), (2, 2), 'relu',
                              'up2')

    ### up 3
    conv8b = upsampling_block(conv7b, conv2b, 32, (3, 3), (2, 2), 'relu',
                              'up3')

    ### up 4
    conv9b = upsampling_block(conv8b, conv1b, 16, (3, 3), (2, 2), 'relu',
                              'up4')

    outputs = tf.keras.layers.Conv2D(1, (1, 1))(conv9b)
    #outputs = tf.keras.layers.Conv2D(1, (1, 1), activation = 'sigmoid')(conv9b)

    model_save_callback = tf.keras.callbacks.ModelCheckpoint(
        model_dir + '/model_epoch_{epoch:04d}.h5',
        save_weights_only=True,
        period=25)

    #model = tf.keras.Model(inputs = [inputs], outputs = [outputs])

    model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
    #model.compile(optimizer = 'adam', loss = 'binary_crossentropy', from_logits = True, metrics = ['accuracy'])
    #model.compile(optimizer = 'adam', loss = weighted_binary_crossentropy(pos_weight=10), from_logits = True, metrics = ['accuracy'])
    #model.compile(optimizer = 'adam', loss = weighted_binary_crossentropy(pos_weight=10), metrics = ['accuracy'])
    model.compile(optimizer='adam',
                  loss=dice_coefficient(),
                  metrics=['accuracy'])
    #model.compile(optimizer = 'adam', loss = tversky_loss(0.1), metrics = ['accuracy'])
    model.summary()

    #sys.exit(0)

    # model.fit(x_train, y_train, validation_split=0.1, batch_size=16, epochs=20,
    #                 callbacks=callbacks)

    BATCH_SIZE = 32
    BUFFER_SIZE = 1000
    STEPS_PER_EPOCH = N // BATCH_SIZE

    train_dataset = train_data.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    #test_dataset = test.batch(BATCH_SIZE)

    EPOCHS = 250
    VAL_SUBSPLITS = 5
    #VALIDATION_STEPS = info.splits['test'].num_examples//BATCH_SIZE//VAL_SUBSPLITS
    VALIDATION_STEPS = N // BATCH_SIZE // VAL_SUBSPLITS

    model_history = model.fit(train_dataset,
                              epochs=EPOCHS,
                              steps_per_epoch=STEPS_PER_EPOCH,
                              validation_steps=VALIDATION_STEPS,
                              validation_data=train_dataset,
                              callbacks=[model_save_callback])

    sigmoid = lambda x: 1 / (1 + np.exp(-1 * x))

    model.save(model_dir + '/model_final.h5')
コード例 #4
0
def main():
    data_dir = base_dir + '/ultrasound-nerve-segmentation/train_sample'
    model_dir = base_dir + '/models'

    IMG_HEIGHT = 128
    IMG_WIDTH = 128
    NUM_CHANNELS = 3
    OUTPUT_CHANNELS = 1

    train_data = load_data(data_dir, [IMG_WIDTH, IMG_HEIGHT])

    #N = 5635
    N = 500
    tag = 'model_simple2_w25'

    # for image, mask in train_data.take(1):
    #     sample_image, sample_mask = image, mask

    base_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3],
                                                   include_top=False)

    # Use the activations of these layers
    layer_names = [
        'block_1_expand_relu',  # 64x64
        'block_3_expand_relu',  # 32x32
        'block_6_expand_relu',  # 16x16
        'block_13_expand_relu',  # 8x8
        'block_16_project',  # 4x4
    ]

    layers = [base_model.get_layer(name).output for name in layer_names]

    print(layers)

    # Create the feature extraction model
    down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers)
    print(down_stack)

    down_stack.trainable = False

    up_stack = [
        pix2pix.upsample(512, 3),  # 4x4 -> 8x8
        pix2pix.upsample(256, 3),  # 8x8 -> 16x16
        pix2pix.upsample(128, 3),  # 16x16 -> 32x32
        pix2pix.upsample(64, 3),  # 32x32 -> 64x64
    ]

    inputs = tf.keras.layers.Input(shape=[128, 128, 3])
    x = inputs

    # Downsampling through the model
    skips = down_stack(x)
    x = skips[-1]
    skips = reversed(skips[:-1])

    # Upsampling and establishing the skip connections
    for up, skip in zip(up_stack, skips):
        x = up(x)
        concat = tf.keras.layers.Concatenate()
        x = concat([x, skip])

    # This is the last layer of the model
    last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS,
                                           3,
                                           strides=2,
                                           padding='same')  #64x64 -> 128x128

    x = last(x)
    #outputs = tf.keras.layers.Conv2D(1, (1, 1), activation = 'sigmoid')(x)
    outputs = tf.keras.layers.Conv2D(1, (1, 1))(x)

    # METRICS = [
    #     'accuracy',
    #     tf.keras.metrics.Precision(),
    #     tf.keras.metrics.Recall()
    # ]

    def custom_metric(y_true, y_pred):
        return 0.5

    # METRICS = [
    #     'accuracy',
    #     custom_metric
    # ]

    METRICS = ['accuracy']

    # model = tf.keras.Model(inputs=inputs, outputs = x)
    # model.compile(optimizer='adam',
    #               loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
    #               metrics=METRICS)

    model_save_callback = tf.keras.callbacks.ModelCheckpoint(
        model_dir + '/model_epoch_{epoch:04d}.h5',
        save_weights_only=True,
        period=25)

    model = tf.keras.Model(inputs=inputs, outputs=outputs)
    #model.compile(optimizer='adam', loss= 'binary_crossentropy', metrics=METRICS)
    model.compile(optimizer='adam',
                  loss=weighted_binary_crossentropy(pos_weight=25),
                  metrics=['accuracy'])

    # model.compile(optimizer='adam',
    #               loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    #               metrics=['accuracy'])

    print(model)

    #tf.keras.utils.plot_model(model, show_shapes=True)

    #show_predictions(model, train_data, 1)

    ########################################
    BATCH_SIZE = 32
    BUFFER_SIZE = 1000
    STEPS_PER_EPOCH = N // BATCH_SIZE

    train_dataset = train_data.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat()
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)
    #test_dataset = test.batch(BATCH_SIZE)

    EPOCHS = 250
    VAL_SUBSPLITS = 5
    #VALIDATION_STEPS = info.splits['test'].num_examples//BATCH_SIZE//VAL_SUBSPLITS
    VALIDATION_STEPS = N // BATCH_SIZE // VAL_SUBSPLITS

    # model_history = model.fit(train_data, epochs=EPOCHS,
    #                           steps_per_epoch=STEPS_PER_EPOCH,
    #                           validation_steps=VALIDATION_STEPS,
    #                           validation_data=train_data,
    #                           callbacks=[DisplayCallback()])

    class_weight = {0: 2.0, 1: 98.0}

    # model_history = model.fit(train_dataset, epochs = EPOCHS,
    #                           steps_per_epoch = STEPS_PER_EPOCH,
    #                           validation_steps = VALIDATION_STEPS,
    #                           validation_data = train_dataset,
    #                           class_weight = class_weight)

    model_history = model.fit(train_dataset,
                              epochs=EPOCHS,
                              steps_per_epoch=STEPS_PER_EPOCH,
                              validation_steps=VALIDATION_STEPS,
                              validation_data=train_dataset,
                              callbacks=[model_save_callback])

    loss = model_history.history['loss']
    val_loss = model_history.history['val_loss']

    epochs = range(EPOCHS)

    # plt.figure()
    # plt.plot(epochs, loss, 'r', label='Training loss')
    # plt.plot(epochs, val_loss, 'bo', label='Validation loss')
    # plt.title('Training and Validation Loss')
    # plt.xlabel('Epoch')
    # plt.ylabel('Loss Value')
    # plt.ylim([0, 1])
    # plt.legend()
    # plt.show()

    #show_predictions(model, train_data, 1)
    sigmoid = lambda x: 1 / (1 + np.exp(-1 * x))

    model.save(model_dir + '/{0}.h5'.format(tag))