Esempio n. 1
0
dataset = geometric_3d_dataset.Geometric3DDataset(patch_size=patch_size,
                                                  task=geometric_3d_dataset.Geometric3DDataset.CLASSIFICATION_TASK,
                                                  centered=True)

for e in range(nb_epoch):

    train_iterator = dataset.iterator(batch_size=batch_size,
                                      num_batches=nb_test_batches)

    for b in range(nb_train_batches):
        X_batch, Y_batch = train_iterator.next()
        loss = model.train(X_batch, Y_batch)
        print 'loss: ' + str(loss)

    test_iterator = dataset.iterator(batch_size=batch_size,
                                     num_batches=nb_train_batches)

    for b in range(nb_test_batches):
        X_batch, Y_batch = test_iterator.next()
        error = model.test(X_batch, Y_batch)
        print 'error: ' + str(error)








Esempio n. 2
0
          kernel_initializer=rnd_normal_init,
          bias_initializer=rnd_normal_init))
model.add(Dropout(0.5))
model.add(
    Dense(30,
          activation='relu',
          kernel_initializer=rnd_normal_init,
          bias_initializer=rnd_normal_init))
model.add(Dropout(0.5))
model.add(
    Dense(num_classes,
          activation='softmax',
          kernel_initializer=rnd_normal_init,
          bias_initializer=rnd_normal_init))

model.summary()

model.compile(
    loss='categorical_crossentropy',  #sum_squared_error
    optimizer=keras.optimizers.Adam(lr=0.1),
    metrics=['categorical_accuracy'])

history = model.fit(x_train,
                    y_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=2,
                    validation_data=(x_test_official, y_test_official))
score = model.test(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Esempio n. 3
0
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=20,  # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False,
    )  # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    for e in range(nb_epoch):
        print("-" * 40)
        print("Epoch", e)
        print("-" * 40)
        print("Training...")
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train):
            loss = model.train(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[("train loss", loss)])

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test):
            score = model.test(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[("test loss", score)])
Esempio n. 4
0
model.add(Dropout(0.3))

model.add(Dense(50, 1, init='uniform'))
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.5, nesterov=True)


rmsprop = RMSprop(lr=0.0001, rho=0.5, epsilon=1e-6)
model.compile(loss='mean_absolute_error', optimizer=rmsprop,class_mode='binary')



# In[25]:



model.fit(a_train, b_train, nb_epoch=500,show_accuracy=True)
score = model.evaluate(a_test, b_test)



# In[26]:

[loss, accuracy] = model.test(a_test, b_test, accuracy=True)
print ('loss:', loss)
print ('accuracy:', accuracy)
temp_pauaw = 0
# In[ ]:



class MelanomaModel:

    def __init__(self, nb_train_batches, batch_size, is3D):

        self.model = None
        self.train_data_set=None
        self.test_data_set=None
        self.valid_data_set=None
        self.is3D = is3D;

        # compute the number of mini-batches for training, validation and testing
        self.nb_train_batches=nb_train_batches
        self.batch_size=batch_size

    def create_model(self):
        self.model = Sequential()
        self.model.add(Convolution3D(16, stack_size=1, nb_row=11, nb_col=11, nb_depth=6, border_mode='valid'))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling3D(poolsize=(3, 3, 1)))
        self.model.add(Convolution3D(32, stack_size=16, nb_row=5, nb_col=5, nb_depth=1, border_mode='valid' ))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling3D(poolsize=(3, 3, 1)))
        self.model.add(Convolution3D(64, stack_size=32, nb_row=3, nb_col=3, nb_depth=1, border_mode='valid' ))
        self.model.add(MaxPooling3D(poolsize=(3, 3, 1)))
        self.model.add(Flatten3D())
        self.model.add(Dense(4096, 1024, init='normal'))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(1024, 512, init='normal'))
        self.model.add(Activation('relu'))
        self.model.add(Dense(512, 2, init='normal'))

        # let's train the self.model using SGD + momentum(how original).
        sgd = RMSprop(rho=0.9, epsilon=1e-3, lr=0.001)
        self.model.compile(loss='mean_squared_error', optimizer=sgd)

    def load_model(self, model_file_path):
        model_file = open(model_file_path)
        self.model = cPickle.load(model_file)

    def load_melanoma_dataset(self, data_dir, training_perc):
        # Preparing melanoma dataset
        # data directory folder path
        file_names = ([data_dir + filename for filename in os.listdir(data_dir) if ".h5" in filename])
        random.shuffle(file_names)

        train_file_names = file_names[0:int(training_perc*len(file_names))]
        test_file_names = file_names[int(training_perc*len(file_names)):]

        if self.is3D:
            self.train_data_set = MelanomaDataset3D(data_dir, examples=train_file_names)
            self.test_data_set = MelanomaDataset3D(data_dir, examples=test_file_names)
        else:
            self.train_data_set = MelanomaDataset2D(data_dir, examples=train_file_names)
            self.test_data_set = MelanomaDataset2D(data_dir, examples=test_file_names)

    # storing and printing average error over all the mini-batches in an epoch
    def train_model(self, nb_epoch, model_starting_id, model_snapshot_freq, stats_snapshot_freq):
        losses = []
        errors = []

        last_error = float("inf")

        for e in range(nb_epoch):

            print " Performing Epoch no : " + str(e)+".......",

            train_iterator = self.train_data_set.iterator(batch_size=self.batch_size,
                                                          num_batches=self.nb_train_batches,
                                                          mode='even_shuffled_sequential')

            for b in range(self.nb_train_batches):
                X_batch, Y_batch = train_iterator.next()
                loss = self.model.train(X_batch, Y_batch)
                sys.stdout.write("Loss: %f%%   \r" % (loss))
                sys.stdout.flush()
                losses.append(loss)

            test_iterator = self.test_data_set.iterator(batch_size=self.batch_size,
                                                        mode='sequential')

            errors1 = []
            while test_iterator.has_next():
                X_batch, Y_batch, bacth_files = test_iterator.next()
                error = self.model.test(X_batch, Y_batch)
                errors1.append(error)

            mean_error = np.mean(errors1)
            errors.append(mean_error)
            print "error:   "+ str(mean_error)

            if mean_error < last_error:
                last_error = mean_error
                pickle.dump(self.model, open("best_model_"+str(e)+".pkl","wc"))

            if(e % stats_snapshot_freq == 0 and e > 0):
                pickle.dump(losses, open("loss.pkl", "wc"))
                pickle.dump(errors, open("error.pkl", "wc"))

            if(e % model_snapshot_freq == 0 and e > 0):
                pickle.dump(self.model, open("trained_model.pkl","wc"))
                model_starting_id += 1
Esempio n. 6
0
model.add(Flatten(nb_filter * 14 * 14 * 14))
model.add(Dense(nb_filter * 14 * 14 * 14, nb_classes, init='normal'))
model.add(Activation('softmax'))

# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

dataset = geometric_3d_dataset.Geometric3DDataset(
    patch_size=patch_size,
    task=geometric_3d_dataset.Geometric3DDataset.CLASSIFICATION_TASK,
    centered=True)

for e in range(nb_epoch):

    train_iterator = dataset.iterator(batch_size=batch_size,
                                      num_batches=nb_test_batches)

    for b in range(nb_train_batches):
        X_batch, Y_batch = train_iterator.next()
        loss = model.train(X_batch, Y_batch)
        print 'loss: ' + str(loss)

    test_iterator = dataset.iterator(batch_size=batch_size,
                                     num_batches=nb_train_batches)

    for b in range(nb_test_batches):
        X_batch, Y_batch = test_iterator.next()
        error = model.test(X_batch, Y_batch)
        print 'error: ' + str(error)
model.add(Dense(input_dimesion, 250, init='uniform'))
model.add(Activation('sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(250, 50, init='uniform'))
model.add(Dense(50, 1, init='uniform'))

'''
define the optimization function and compile it:
'''

#sgd = SGD(lr=0.001, decay=1e-6, momentum=0.5, nesterov=True)
rmsprop = RMSprop(lr=0.0001, rho=0.5, epsilon=1e-6)
model.compile(loss='mean_absolute_error', optimizer=rmsprop,class_mode='binary')

'''
train the mocdel
'''
print model.fit(a_train, b_train, nb_epoch=500,show_accuracy=True,verbose=1)


'''
test the model
'''
print model.test(a_test, b_test, accuracy=True)






        samplewise_center=False, # set each sample mean to 0
        featurewise_std_normalization=True, # divide inputs by std of the dataset
        samplewise_std_normalization=False, # divide each input by its std
        zca_whitening=False, # apply ZCA whitening
        rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)
        width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
        horizontal_flip=True, # randomly flip images
        vertical_flip=False) # randomly flip images

    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
    datagen.fit(X_train)

    for e in range(nb_epoch):
        print('-'*40)
        print('Epoch', e)
        print('-'*40)
        print("Training...")
        # batch train with realtime data augmentation
        progbar = generic_utils.Progbar(X_train.shape[0])
        for X_batch, Y_batch in datagen.flow(X_train, Y_train):
            loss = model.train(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[("train loss", loss)])

        print("Testing...")
        # test time!
        progbar = generic_utils.Progbar(X_test.shape[0])
        for X_batch, Y_batch in datagen.flow(X_test, Y_test):
            score = model.test(X_batch, Y_batch)
            progbar.add(X_batch.shape[0], values=[("test loss", score)])
Esempio n. 9
0
model = Sequential()
#Input shape: 2D tensor with shape: (nb_samples, input_dim):
# i guess that the nb_samples is the samples dimension
model.add(Dense(input_dimesion, 250, init='uniform'))
model.add(Activation('sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(250, 50, init='uniform'))
model.add(Dropout(0.3))

model.add(Dense(50, 1, init='uniform'))
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.5, nesterov=True)

rmsprop = RMSprop(lr=0.0001, rho=0.5, epsilon=1e-6)
model.compile(loss='mean_absolute_error',
              optimizer=rmsprop,
              class_mode='binary')

# In[25]:

model.fit(a_train, b_train, nb_epoch=500, show_accuracy=True)
score = model.evaluate(a_test, b_test)

# In[26]:

[loss, accuracy] = model.test(a_test, b_test, accuracy=True)
print('loss:', loss)
print('accuracy:', accuracy)
temp_pauaw = 0
# In[ ]: