Example #1
0
dataset = geometric_3d_dataset.Geometric3DDataset(patch_size=patch_size,
                                                  task=geometric_3d_dataset.Geometric3DDataset.CLASSIFICATION_TASK,
                                                  centered=True)

for e in range(nb_epoch):

    train_iterator = dataset.iterator(batch_size=batch_size,
                                      num_batches=nb_test_batches)

    for b in range(nb_train_batches):
        X_batch, Y_batch = train_iterator.next()
        loss = model.train(X_batch, Y_batch)
        print 'loss: ' + str(loss)

    test_iterator = dataset.iterator(batch_size=batch_size,
                                     num_batches=nb_train_batches)

    for b in range(nb_test_batches):
        X_batch, Y_batch = test_iterator.next()
        error = model.test(X_batch, Y_batch)
        print 'error: ' + str(error)








Example #2
0
model.add(Dropout(0.3))

model.add(Dense(50, 1, init='uniform'))
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.5, nesterov=True)


rmsprop = RMSprop(lr=0.0001, rho=0.5, epsilon=1e-6)
model.compile(loss='mean_absolute_error', optimizer=rmsprop,class_mode='binary')



# In[25]:



model.fit(a_train, b_train, nb_epoch=500,show_accuracy=True)
score = model.evaluate(a_test, b_test)



# In[26]:

[loss, accuracy] = model.test(a_test, b_test, accuracy=True)
print ('loss:', loss)
print ('accuracy:', accuracy)
temp_pauaw = 0
# In[ ]:



class MelanomaModel:

    def __init__(self, nb_train_batches, batch_size, is3D):

        self.model = None
        self.train_data_set=None
        self.test_data_set=None
        self.valid_data_set=None
        self.is3D = is3D;

        # compute the number of mini-batches for training, validation and testing
        self.nb_train_batches=nb_train_batches
        self.batch_size=batch_size

    def create_model(self):
        self.model = Sequential()
        self.model.add(Convolution3D(16, stack_size=1, nb_row=11, nb_col=11, nb_depth=6, border_mode='valid'))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling3D(poolsize=(3, 3, 1)))
        self.model.add(Convolution3D(32, stack_size=16, nb_row=5, nb_col=5, nb_depth=1, border_mode='valid' ))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling3D(poolsize=(3, 3, 1)))
        self.model.add(Convolution3D(64, stack_size=32, nb_row=3, nb_col=3, nb_depth=1, border_mode='valid' ))
        self.model.add(MaxPooling3D(poolsize=(3, 3, 1)))
        self.model.add(Flatten3D())
        self.model.add(Dense(4096, 1024, init='normal'))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1024, 512, init='normal'))
        self.model.add(Activation('relu'))
        self.model.add(Dense(512, 2, init='normal'))

        # let's train the self.model using SGD + momentum(how original).
        sgd = RMSprop(rho=0.9, epsilon=1e-3, lr=0.001)
        self.model.compile(loss='mean_squared_error', optimizer=sgd)

    def load_model(self, model_file_path):
        model_file = open(model_file_path)
        self.model = cPickle.load(model_file)

    def load_melanoma_dataset(self, data_dir, training_perc):
        # Preparing melanoma dataset
        # data directory folder path
        file_names = ([data_dir + filename for filename in os.listdir(data_dir) if ".h5" in filename])
        random.shuffle(file_names)

        train_file_names = file_names[0:int(training_perc*len(file_names))]
        test_file_names = file_names[int(training_perc*len(file_names)):]

        if self.is3D:
            self.train_data_set = MelanomaDataset3D(data_dir, examples=train_file_names)
            self.test_data_set = MelanomaDataset3D(data_dir, examples=test_file_names)
        else:
            self.train_data_set = MelanomaDataset2D(data_dir, examples=train_file_names)
            self.test_data_set = MelanomaDataset2D(data_dir, examples=test_file_names)

    # storing and printing average error over all the mini-batches in an epoch
    def train_model(self, nb_epoch, model_starting_id, model_snapshot_freq, stats_snapshot_freq):
        losses = []
        errors = []

        last_error = float("inf")

        for e in range(nb_epoch):

            print " Performing Epoch no : " + str(e)+".......",

            train_iterator = self.train_data_set.iterator(batch_size=self.batch_size,
                                                          num_batches=self.nb_train_batches,
                                                          mode='even_shuffled_sequential')

            for b in range(self.nb_train_batches):
                X_batch, Y_batch = train_iterator.next()
                loss = self.model.train(X_batch, Y_batch)
                sys.stdout.write("Loss: %f%%   \r" % (loss))
                sys.stdout.flush()
                losses.append(loss)

            test_iterator = self.test_data_set.iterator(batch_size=self.batch_size,
                                                        mode='sequential')

            errors1 = []
            while test_iterator.has_next():
                X_batch, Y_batch, bacth_files = test_iterator.next()
                error = self.model.test(X_batch, Y_batch)
                errors1.append(error)

            mean_error = np.mean(errors1)
            errors.append(mean_error)
            print "error:   "+ str(mean_error)

            if mean_error < last_error:
                last_error = mean_error
                pickle.dump(self.model, open("best_model_"+str(e)+".pkl","wc"))

            if(e % stats_snapshot_freq == 0 and e > 0):
                pickle.dump(losses, open("loss.pkl", "wc"))
                pickle.dump(errors, open("error.pkl", "wc"))

            if(e % model_snapshot_freq == 0 and e > 0):
                pickle.dump(self.model, open("trained_model.pkl","wc"))
                model_starting_id += 1
Example #4
0
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=20,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False,
    )  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    for e in range(nb_epoch):
        print("-" * 40)
        print("Epoch", e)
        print("-" * 40)
        print("Training...")
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train):
            loss = model.train(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[("train loss", loss)])

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test):
            score = model.test(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[("test loss", score)])
model.add(Dense(input_dimesion, 250, init='uniform'))
model.add(Activation('sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(250, 50, init='uniform'))
model.add(Dense(50, 1, init='uniform'))

'''
define the optimization function and compile it:
'''

#sgd = SGD(lr=0.001, decay=1e-6, momentum=0.5, nesterov=True)
rmsprop = RMSprop(lr=0.0001, rho=0.5, epsilon=1e-6)
model.compile(loss='mean_absolute_error', optimizer=rmsprop,class_mode='binary')

'''
train the mocdel
'''
print model.fit(a_train, b_train, nb_epoch=500,show_accuracy=True,verbose=1)


'''
test the model
'''
print model.test(a_test, b_test, accuracy=True)