示例#1
0
img_rows, img_cols = 100,100
# the mitosis images are RGB
img_channels = 3

# the data, shuffled and split between train and test sets
image_list = create_image_list('/data/50_50_100_40', sample_size=[-1,-1,0,-1,-1], use_mr=True)

with open('image_list.pkl', 'w+') as f:
    pickle.dump(image_list, f)

# Create testset data for cross-val
num_images = len(image_list)
test_size = int(0.1 * num_images)
print("Train size: ", num_images-test_size)
print("Test size: ", test_size)
print("Training Distribution: ", get_counts(image_list[0:-test_size], nb_classes))

model = Sequential()

model.add(Convolution2D(256, 6, 6, border_mode='same',
                        input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(256, 6, 6))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.1))

model.add(Convolution2D(128, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(128, 3, 3))
model.add(Activation('relu'))
    Activation('relu'),
    MaxPooling2D(pool_size=(2, 2)),
    Dropout(0.1)
]
classification_layers = [
    Flatten(),
    Dense(512),
    Activation('relu'),
    Dropout(0.5),
    Dense(nb_classes),
    Activation('softmax'),
]

# Train on cluster0
image_list0 = create_image_clusters(image_list, image_names, clusters, 0)
print("Distribution of classes (0,1,2): ", get_counts(image_list0))
model0, weights0 = train_model(feature_layers, classification_layers,
                               image_list0, nb_epoch, nb_classes, img_rows,
                               img_cols)

# Train on cluster1
image_list1 = create_image_clusters(image_list, image_names, clusters, 1)
print("Distribution of classes (0,1,2): ", get_counts(image_list1))
model1, weights1 = train_model(feature_layers, classification_layers,
                               image_list1, nb_epoch, nb_classes, img_rows,
                               img_cols, weights0)

# Train on cluster2
image_list2 = create_image_clusters(image_list, image_names, clusters, 2)
print("Distribution of classes (0,1,2): ", get_counts(image_list2))
model_final, weights2 = train_model(feature_layers, classification_layers,
示例#3
0
# the data, shuffled and split between train and test sets
image_list = create_image_list('/data/50_50_100_40',
                               sample_size=[-1, -1, 0, -1, -1],
                               use_mr=True)

with open('image_list.pkl', 'w+') as f:
    pickle.dump(image_list, f)

# Create testset data for cross-val
num_images = len(image_list)
test_size = int(0.1 * num_images)
print("Train size: ", num_images - test_size)
print("Test size: ", test_size)
print("Training Distribution: ",
      get_counts(image_list[0:-test_size], nb_classes))

model = Sequential()

model.add(
    Convolution2D(256,
                  6,
                  6,
                  border_mode='same',
                  input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(256, 6, 6))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.1))
    MaxPooling2D(pool_size=(2, 2)),
    Dropout(0.1)
]
classification_layers = [
    Flatten(),
    Dense(512),
    Activation('relu'),
    Dropout(0.5),
    Dense(nb_classes),
    Activation('softmax'),
]


# Train on cluster0
image_list0 = create_image_clusters(image_list, image_names, clusters, 0)
print("Distribution of classes (0,1,2): ", get_counts(image_list0))
model0, weights0 = train_model(feature_layers, classification_layers, image_list0, nb_epoch, nb_classes, img_rows, img_cols) 

# Train on cluster1
image_list1 = create_image_clusters(image_list, image_names, clusters, 1)
print("Distribution of classes (0,1,2): ", get_counts(image_list1))
model1, weights1 = train_model(feature_layers, classification_layers, image_list1, nb_epoch, nb_classes, img_rows, img_cols, weights0) 

# Train on cluster2
image_list2 = create_image_clusters(image_list, image_names, clusters, 2)
print("Distribution of classes (0,1,2): ", get_counts(image_list2))
model_final, weights2 = train_model(feature_layers, classification_layers, image_list2, nb_epoch, nb_classes, img_rows, img_cols, weight1) 

json_string = model_final.to_json()
open('cluster_model_architecture.json', 'w').write(json_string)
model_final.save_weights('cluster_model_weights.h5')