示例#1
0
 def loadDataset(self):
     print 'load MNIST dataset'
     dataset = data.load_train_data()
     dataset['data'] = dataset['data'].astype(np.float32)
     dataset['data'] /= 255    # 最終的にnpの行列データを取得
     dataset['target'] = dataset['target'].astype(np.int32)
     return dataset
示例#2
0
def load_data(data_path):
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train, imgs_patient_train = load_train_data(data_path)

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]
    
    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_id_test = load_test_data(data_path)
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std
    
    return imgs_train, imgs_mask_train, imgs_patient_train, imgs_test
示例#3
0
def train_unet(data_path, save_path, basename="", weight_load = "", 
                      valid_size=.2, batch_size = 32, nb_epoch = 10, dropout = .25):
                          
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train, imgs_patient_train = load_train_data(data_path)

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]
    
    random_state = 51
    X_train, X_valid, Y_train, Y_valid = train_test_split(imgs_train, imgs_mask_train, 
                                                          test_size=valid_size, random_state=random_state)

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet(dropout)
    model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)
    
    # pas à faire tout le temps...
    if weight_load:
        model.load_weights(weight_load)

    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, 
              verbose=1, shuffle=True, callbacks=[model_checkpoint])
    
    
    Y_train_pred = model.predict(X_train, verbose=1)
    if (valid_size > 0):
        Y_valid_pred = model.predict(X_valid, verbose=1)

    
    score_training = np_dice_coef(Y_train, Y_train_pred)
    if (valid_size > 0):
        score_validation = np_dice_coef(Y_valid, Y_valid_pred)
    print('Score on training set: dice_coef = ', score_training)
    if (valid_size > 0):
        print('Score on validation set: dice_coef = ', score_validation)
    
    weight_save = save_path + '/unet' + basename + '.hdf5'
    if weight_save:
        model.save_weights(weight_save)
    
    return mean, std
示例#4
0
def load_data():
    imgs_train, imgs_mask_train = load_train_data()
    imgs_train = imgs_train.astype('float32')
    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]
    print('We have ',imgs_train.shape[0],' training samples')
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = imgs_test.astype('float32')
    print('We have ',imgs_test.shape[0],' test samples')
    return imgs_train, imgs_mask_train, imgs_test, imgs_id_test
def train_and_predict():
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    imgs_has_mask_train = (imgs_mask_train.reshape(imgs_mask_train.shape[0], -1).max(axis=1) > 0) * 1

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)
    remote_monitor = RemoteMonitor(root='http://localhost:9000')

    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    model.fit(imgs_train, [imgs_mask_train, imgs_has_mask_train], batch_size=32, nb_epoch=20, verbose=1, shuffle=True,
              callbacks=[model_checkpoint, remote_monitor])

    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('unet.hdf5')

    print('-'*30)
    print('Predicting masks on test data...')
    print('-'*30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)[0]  # Get output only for first output
    np.save('imgs_mask_test.npy', imgs_mask_test)
示例#6
0
def train_and_predict():
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)

    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    model.fit(imgs_train, imgs_mask_train, batch_size=32, nb_epoch=50, verbose=1, shuffle=True,
              callbacks=[model_checkpoint])

    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    tst_mean = np.mean(imgs_test)
    tst_std = np.std(imgs_test)
    imgs_test -= tst_mean
    imgs_test /= tst_std

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('unet.hdf5')

    print('-'*30)
    print('Predicting masks on test data...')
    print('-'*30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)
def train_and_predict():
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    n_fold = 5
    k_fold_train(imgs_train, imgs_mask_train, n_fold=n_fold)

    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model = get_unet()
    results = []
    for i in range(n_fold):
        model.load_weights('unet_fold%s.hdf5' % i)
        print('-' * 30)
        print('%s Predicting masks on test data...' % i)
        print('-' * 30)
        imgs_mask_test = model.predict(imgs_test, verbose=1)
        results.append(imgs_mask_test)
    imgs_mask_test = reduce(lambda x, y: x + y, results)/n_fold
    np.save('imgs_mask_test_nfold.npy', imgs_mask_test)
示例#8
0
def visualize():
    from data import load_train_data
    imgs_train, imgs_train_mask = load_train_data()
    imgs_train_pred=np.load('imgs_train_pred.npy')
    total=imgs_train.shape[0]
    for i in range(total):
        # augmentation(imgs_train[i,0])
        plt.subplot(221)
        plt.imshow(imgs_train[i,0])
        plt.subplot(222)
        plt.imshow(imgs_train_mask[i,0])
        plt.subplot(223)
        plt.imshow(imgs_train_pred[i,0])
        img = prep(imgs_train_pred[i,0])
        plt.subplot(224)
        plt.imshow(img)
        plt.show()
unet.add(concatenated_de_convolution_layer(64))
unet.add(convolution_layer(64))
unet.add(convolution_layer(64))

unet.add(concatenated_de_convolution_layer(32))
unet.add(convolution_layer(32))
unet.add(convolution_layer(32))

unet.add(convolution_layer(1, kernel=(1, 1), activation='sigmoid'))

unet.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-5),
              loss=dice_coefficient_loss,
              metrics=[dice_coefficient])


x_train, y_train_mask = load_train_data()

x_train = preprocess(x_train)
y_train_mask = preprocess(y_train_mask)

x_train = x_train.astype('float32')
mean = np.mean(x_train)
std = np.std(x_train)

x_train -= mean
x_train /= std

y_train_mask = y_train_mask.astype('float32')
y_train_mask /= 255.

unet.fit(x_train, y_train_mask, batch_size=32, epochs=20, verbose=1, shuffle=True,
示例#10
0
def main(argv=None):
    print('start running...')
    start_time = time.time()

    ############################################ build graph ############################################

    monnet = mn.MonNet(FLAGS)

    if int(FLAGS.train) + int(FLAGS.test) + int(FLAGS.encode) != 1:
        print('please specify \'train\' or \'test\' or \'encode\'')
        return

    views = vw.Views(os.path.join(FLAGS.data_dir, 'view', FLAGS.view_file))

    if FLAGS.train:
        train_names, train_sources, train_targets, train_masks, train_angles, num_train_shapes = data.load_train_data(
            FLAGS, views)
        valid_names, valid_sources, valid_targets, valid_masks, valid_angles, num_valid_shapes = data.load_validate_data(
            FLAGS, views)

        with tf.variable_scope("monnet") as scope:
            monnet.build_network( \
                names=train_names,
                sources=train_sources,
                targets=train_targets,
                masks=train_masks,
                angles=train_angles,
                views=views,
                is_training=True)
            scope.reuse_variables()  # sharing weights
            monnet.build_network( \
                names=valid_names,
                sources=valid_sources,
                targets=valid_targets,
                masks=valid_masks,
                angles=valid_angles,
                views=views,
                is_validation=True)
    elif FLAGS.test:
        test_names, test_sources, test_targets, test_masks, test_angles, num_test_shapes = data.load_test_data(FLAGS,
                                                                                                               views)

        with tf.variable_scope("monnet") as scope:
            monnet.build_network( \
                names=test_names,
                sources=test_sources,
                targets=test_targets,
                masks=test_masks,
                angles=test_angles,
                views=views,
                is_testing=True)
    elif FLAGS.encode:
        encode_names, encode_sources, encode_targets, encode_masks, encode_angles, num_encode_shapes = data.load_encode_data(
            FLAGS, views)

        with tf.variable_scope("monnet") as scope:
            monnet.build_network( \
                names=encode_names,
                sources=encode_sources,
                targets=encode_targets,
                masks=encode_masks,
                angles=encode_angles,
                views=views,
                is_encoding=True)

    ############################################ compute graph ############################################

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_fraction)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          log_device_placement=False,
                                          allow_soft_placement=True)) as sess:

        if FLAGS.train:
            monnet.train(sess, views, num_train_shapes, num_valid_shapes)
        elif FLAGS.test:
            monnet.test(sess, views, num_test_shapes)
        elif FLAGS.encode:
            monnet.encode(sess, views, num_encode_shapes)

        sess.close()

    duration = time.time() - start_time
    print('total running time: %.1f\n' % duration)
示例#11
0
    def AugmentDS(self, Augentries):
        datagen = ImageDataGenerator(
            #            featurewise_center=False,
            #            featurewise_std_normalization=False,
            #            samplewise_center=False,
            #            samplewise_std_normalization=False,
            rescale=None,
            rotation_range=3,
            width_shift_range=0.08,
            height_shift_range=0.08,
            shear_range=0.07,
            zoom_range=0.07,
            horizontal_flip=True,
            vertical_flip=True,
            fill_mode='constant',
            cval=0.)
        args = self.arggen(Augentries)
        #        trimgs,labimgs,data_path,num
        data_path, fext, num, cr = args[0], args[1], int(args[2]), float(
            args[3])
        create_train_data(data_path, train_p, label_p, fext)
        imgs = load_train_data(data_path, train_npy, labels_npy)
        nLabels = np.max(imgs[1])
        trimgs, labimgs = imgs[0], imgs[1]
        imgshape = trimgs[0].shape
        print('-' * 30)
        print('Augmenting train and labels dataset: ', num,
              'replica per image...')
        print('-' * 30)
        #    seed = np.random.randint(10000)
        seed = np.random.randint(10000, size=2 * len(trimgs) * num)
        tmpf = 'tmp'
        if tmpf in sorted(os.listdir(os.path.join(data_path, train_p))):
            shutil.rmtree(os.path.join(data_path, train_p, tmpf),
                          ignore_errors=True)
            shutil.rmtree(os.path.join(data_path, label_p, tmpf),
                          ignore_errors=True)
        os.makedirs(os.path.join(data_path, train_p, tmpf))
        os.makedirs(os.path.join(data_path, label_p, tmpf))
        global batchdata
        batchdata = []
        j = 0
        for x in trimgs:
            x[x == 0] = 1
            x = x.reshape((1, ) + x.shape + (1, ))
            # the .flow() command below generates batches of randomly transformed images
            # and saves the results to the `preview/` directory
            i = 0

            for batch in datagen.flow(x, batch_size=1, seed=seed[j]):
                self.save_tif(data_path, os.path.join(train_p, tmpf), 'img',
                              batch[0, :, :, 0].astype('uint8'),
                              seed[i + j * 2 * num], fext)
                i += 1
                if i >= 2 * num:
                    break  # otherwise the generator would loop indefinitely
            j += 1
        j = 0
        for y in labimgs:
            y = y.reshape((1, ) + y.shape + (1, ))
            i = 0
            for batch in datagen.flow(y, batch_size=1, seed=seed[j]):

                self.save_tif(data_path, os.path.join(label_p, tmpf), 'img',
                              batch[0, :, :, 0].astype('uint8'),
                              seed[i + j * 2 * num], fext)
                batchdata.append(batch[0, :, :, 0])
                i += 1
                if i >= 2 * num:
                    break  # otherwise the generator would loop indefinitely
            j += 1
        create_train_data(data_path, os.path.join(train_p, tmpf),
                          os.path.join(label_p, tmpf), fext)
        tmpimgs = load_train_data(data_path, train_npy, labels_npy)
        tmptr = tmpimgs[0]
        tmplab = tmpimgs[1]
        print(imgshape, cr)
        lencrop = int(((imgshape[0] * cr) // 16) * 16), int(
            ((imgshape[1] * cr) // 16) * 16)
        print(lencrop)
        delta = imgshape[0] - lencrop[0], imgshape[1] - lencrop[1]
        print(delta)
        seltr = []
        sellab = []
        j = 0
        for i, img in enumerate(tmptr):
            tmpres = crop_no_black(tmptr[i], tmplab[i], lencrop)
            if tmpres is not None:
                seltr.append(tmpres[0])
                sellab.append(tmpres[1])
                j += 1
                if j > len(trimgs) * (num + 1):
                    break
        seltr = np.array(seltr)
        sellab = np.array(sellab)
        print(seltr.shape, sellab.shape)
        np.save(os.path.join(data_path, 'imgs_train.npy'), seltr)
        print('Augmented train data saved to:',
              os.path.join(data_path, 'imgs_train.npy'))
        np.save(os.path.join(data_path, 'imgs_labels.npy'), sellab)
        print('Augmented label data saved to:',
              os.path.join(data_path, 'imgs_labels.npy'))
        if selfold in sorted(os.listdir(os.path.join(data_path, train_p))):
            shutil.rmtree(os.path.join(data_path, train_p, selfold),
                          ignore_errors=True)
            shutil.rmtree(os.path.join(data_path, label_p, selfold),
                          ignore_errors=True)
        os.makedirs(os.path.join(data_path, train_p, selfold))
        os.makedirs(os.path.join(data_path, label_p, selfold))
        for i in range(len(seltr)):
            self.save_tif(data_path, os.path.join(train_p, selfold), 'img',
                          seltr[i], i, fext)
            self.save_tif(data_path, os.path.join(label_p, selfold), 'img',
                          sellab[i], i, fext)
#        create_train_data(data_path,train_p,label_p,fext)
        if tmpf in sorted(os.listdir(os.path.join(data_path, train_p))):
            shutil.rmtree(os.path.join(data_path, train_p, tmpf),
                          ignore_errors=True)
            shutil.rmtree(os.path.join(data_path, label_p, tmpf),
                          ignore_errors=True)
        print('Done')
        return
示例#12
0
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()
    existence_mask = get_object_existence(imgs_mask_train)

    imgs_train = preprocess(imgs_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    y_train = existence_mask.astype(np.uint8)

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint(
        'weights.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss')
    model_save_best = ModelCheckpoint('weights.h5',
                                      monitor='val_loss',
                                      save_best_only=True)

    #early_s = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
    model.load_weights('weights.h5')

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    # TODO: experiment with different batch sizes
    # TODO: try k-fold ensemble training
    model.fit(imgs_train,
              y_train,
              batch_size=32,
              nb_epoch=20,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint, model_save_best])
    exit

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights.h5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)

    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)
    pred_dir = 'preds'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)
    for image, image_id in zip(imgs_mask_test, imgs_id_test):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
示例#13
0
def train_and_predict(bit):
    print('-' * 30)
    print('Loading and train data (bit = ' + str(bit) + ') ...')
    print('-' * 30)
    imgs_bit_train, imgs_bit_mask_train, _ = load_train_data(bit)

    print(imgs_bit_train.shape[0], imgs_bit_mask_train.shape[0])

    imgs_bit_train = imgs_bit_train.astype('float32')
    mean = np.mean(imgs_bit_train)
    std = np.std(imgs_bit_train)

    imgs_bit_train -= mean
    imgs_bit_train /= std

    imgs_bit_mask_train = imgs_bit_mask_train.astype('float32')
    imgs_bit_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model (bit = ' + str(bit) + ') ...')
    print('-' * 30)
    model = get_conv(f=16)

    csv_logger = CSVLogger('log_conv_' + str(bit) + '.csv')
    model_checkpoint = ModelCheckpoint('weights_conv_' + str(bit) + '.h5',
                                       monitor='val_loss',
                                       save_best_only=True)

    print('-' * 30)
    print('Fitting model (bit = ' + str(bit) + ') ...')
    print('-' * 30)

    model.fit(imgs_bit_train,
              imgs_bit_mask_train,
              batch_size=32,
              epochs=epochs,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[csv_logger, model_checkpoint])

    print('-' * 30)
    print('Loading and preprocessing test data (bit = ' + str(bit) + ') ...')
    print('-' * 30)

    imgs_bit_test, imgs_mask_test, imgs_bit_id_test = load_test_data(bit)

    imgs_bit_test = imgs_bit_test.astype('float32')
    imgs_bit_test -= mean
    imgs_bit_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights_conv_' + str(bit) + '.h5')

    print('-' * 30)
    print('Predicting masks on test data (bit = ' + str(bit) + ') ...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_bit_test, verbose=1)

    if bit == 8:
        print('-' * 30)
        print('Saving predicted masks to files...')
        print('-' * 30)
        pred_dir = 'preds_8'
        if not os.path.exists(pred_dir):
            os.mkdir(pred_dir)
        for image, image_id in zip(imgs_mask_test, imgs_bit_id_test):
            image = (image[:, :, 0] * 255.).astype(np.uint8)
            imsave(
                os.path.join(pred_dir,
                             str(image_id).split('/')[-1] + '_pred_conv.png'),
                image)

    elif bit == 16:
        print('-' * 30)
        print('Saving predicted masks to files...')
        print('-' * 30)
        pred_dir = 'preds_16'
        if not os.path.exists(pred_dir):
            os.mkdir(pred_dir)
        for image, image_id in zip(imgs_mask_test, imgs_bit_id_test):
            image = (image[:, :, 0] * 255.).astype(np.uint8)
            imsave(
                os.path.join(pred_dir,
                             str(image_id).split('/')[-1] + '_pred_conv.png'),
                image)
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()
    imgs_present = load_nerve_presence()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    # centering and standardising the images
    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)
    std = np.std(imgs_train)
    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to be in {0, 1} instead of {0, 255}

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)

    # load model - the Learning rate scheduler choice is most important here
    model = get_unet(optimizer=OPTIMIZER, pars=PARS)

    model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
    early_stopping = EarlyStopping(patience=5, verbose=1)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)

    if PARS['outputs'] == 1:
        imgs_labels = imgs_mask_train
    else:
        imgs_labels = [imgs_mask_train, imgs_present]

    model.fit(imgs_train, imgs_labels,
              batch_size=128, epochs=50,
              verbose=1, shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint, early_stopping])

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights.h5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)

    imgs_mask_test = model.predict(imgs_test, verbose=1)

    if PARS['outputs'] == 1:
        np.save('imgs_mask_test.npy', imgs_mask_test)
    else:
        np.save('imgs_mask_test.npy', imgs_mask_test[0])
        np.save('imgs_mask_test_present.npy', imgs_mask_test[1])
示例#15
0
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()
    print(imgs_mask_train.shape)
    imgs_mask_train = imgs_mask_train.reshape(imgs_mask_train.shape[0],
                                              img_rows, img_cols, 1)
    imgs_mask_train = imgs_mask_train.reshape(imgs_mask_train.shape[0],
                                              img_rows, img_cols, 1)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print(imgs_mask_train.shape)
    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_model(imgs_train)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=32,
              nb_epoch=20,
              verbose=1,
              shuffle=True)

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id, imgs_size = load_test_data()

    mean = np.mean(imgs_test)  # mean for data centering
    std = np.std(imgs_test)  # std for data normalization
    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print(imgs_test.shape)
    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    imgs_mask_test *= 255
    i = 0

    for img, name, size in zip(imgs_mask_test, imgs_id, imgs_size):
        img = cv2.resize(img,
                         (int(size.split(',')[1]), int(size.split(',')[0])))
        ret, img = cv2.threshold(img, 200, 255, cv2.THRESH_BINARY)
        cv2.imwrite("Data/test_output/" + str(name) + ".jpg", img)

        #cv2.imwrite("Data/output/" + str(name) + ".jpg", img) #  было

        i += 1

    print(imgs_mask_test.shape)
    np.save('imgs_mask_test.npy', imgs_mask_test)
示例#16
0
def train(fold, plane, batch_size, nb_epoch, init_lr):
    """
    train an Unet model with data from load_train_data()

    Parameters
    ----------
    fold : string
        which fold is experimenting in 4-fold. It should be one of 0/1/2/3

    plane : char
        which plane is experimenting. It is from 'X'/'Y'/'Z'

    batch_size : int
        size of mini-batch

    nb_epoch : int
        number of epochs to train NN

    init_lr : float
        initial learning rate
    """

    print("number of epoch: ", nb_epoch)
    print("learning rate: ", init_lr)

    # --------------------- load and preprocess training data -----------------
    print('-' * 80)
    print('         Loading and preprocessing train data...')
    print('-' * 80)

    imgs_train, imgs_mask_train = load_train_data(fold, plane)

    imgs_row = imgs_train.shape[1]
    imgs_col = imgs_train.shape[2]

    print(imgs_row, imgs_col)

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    imgs_mask_train = imgs_mask_train.astype('float32')

    # ---------------------- Create, compile, and train model ------------------------
    print('-' * 80)

    print('		Creating and compiling model...')
    print('-' * 80)

    model = get_deform_unet(imgs_row,
                            imgs_col,
                            pool_size=(2, 2, 2),
                            init_lr=init_lr)
    print(model.summary())

    print('-' * 80)
    print('		Fitting model...')
    print('-' * 80)

    ver = 'unet_deform_fd%s_%s_ep%s_lr%s.csv' % (cur_fold, plane, epoch,
                                                 init_lr)
    csv_logger = CSVLogger(log_path + ver)
    model_checkpoint = ModelCheckpoint(model_path + ver + ".h5",
                                       monitor='loss',
                                       save_best_only=False,
                                       period=10)

    history = model.fit(imgs_train,
                        imgs_mask_train,
                        batch_size=batch_size,
                        epochs=nb_epoch,
                        verbose=1,
                        shuffle=True,
                        callbacks=[model_checkpoint, csv_logger])
示例#17
0
import numpy as np
from train import get_unet, preprocess

from data import load_test_data, load_train_data

imgs_train, imgs_mask_train = load_train_data()
imgs_train = preprocess(imgs_train)
imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train)
std = np.std(imgs_train)

imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
imgs_test = imgs_test.astype('float32')
imgs_test -= mean
imgs_test /= std

model = get_unet()
model.load_weights("final.h5")
imgs_mask_test = model.predict(imgs_test, verbose=1)

np.save('imgs_mask_test_final.npy', imgs_mask_test)
示例#18
0
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(200, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    # optimizer=SGD
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy', precision, recall, f1score])

    return model


if __name__ == '__main__':
    x_train, y_train, train_ids = load_train_data()
    x_test, y_test, test_ids = load_test_data()

    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, channels)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels)
    input_shape = (img_rows, img_cols, channels)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print('y_train shape:', y_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')
示例#19
0
文件: unet.py 项目: sherridxy/unet
def train_and_predict():
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)

    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    model.fit(imgs_train, imgs_mask_train, batch_size=32, nb_epoch=20, verbose=1, shuffle=True,
              callbacks=[model_checkpoint])

    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_mask_test_truth = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('unet.hdf5')

    print('-'*30)
    print('Predicting masks on test data...')
    print('-'*30)
    imgs_mask_test_result = model.predict(imgs_test, verbose=1)
    
    imgs_mask_test_result = postprocess(imgs_mask_test_result)
    #test results is converted to 0-255 due to resizing
    print(imgs_mask_test_result.max())

    imgs_mask_test_result = imgs_mask_test_result.astype('float32')
    imgs_mask_test_result /= 255
    print(imgs_mask_test_truth.shape)



    imgs_mask_test_truth = imgs_mask_test_truth.astype('float32')
    imgs_mask_test_truth /= 255

    test_truth = imgs_mask_test_truth.flatten()
    test_result = imgs_mask_test_result.flatten()

    print(test_result.shape)
    print(test_truth.shape)
    intersect = test_result * test_truth
    dice_score = (2. * intersect.sum()) / (test_truth.sum() + test_result.sum())
    print('Dice coefficient on testing data is : {0:.3f}.'.format(dice_score))
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = np.array([clahe(img) for img in imgs_train])

    imgs_train = preprocess(imgs_train)

    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    y_bin = np.array([mask_not_blank(mask) for mask in imgs_mask_train])

    # X_train,X_test,y_train,y_test = train_test_split(imgs_train,imgs_mask_train,test_size=0.2,random_state=seed)

    skf = StratifiedKFold(y_bin, n_folds=10, shuffle=True, random_state=seed)
    for ind_tr, ind_te in skf:
        X_train = imgs_train[ind_tr]
        X_test = imgs_train[ind_te]
        y_train = imgs_mask_train[ind_tr]
        y_test = imgs_mask_train[ind_te]
        break

    # X_train_rotate = get_rotation(X_train)
    # y_train_rotate  = get_rotation(y_train)
    # X_train = np.concatenate((X_train,X_train_rotate),axis=0)
    # y_train = np.concatenate((y_train,y_train_rotate),axis=0)
    # print(X_train.shape,y_train.shape)

    # X_train_rotate = get_rotation(X_train,degree=22.5)
    # y_train_rotate  = get_rotation(y_train,degree=22.5)
    # X_train = np.concatenate((X_train,X_train_rotate),axis=0)
    # y_train = np.concatenate((y_train,y_train_rotate),axis=0)
    # print(X_train.shape,y_train.shape)

    # X_train_rotate = get_rotation(X_train,degree=11.25)
    # y_train_rotate  = get_rotation(y_train,degree=11.25)
    # X_train = np.concatenate((X_train,X_train_rotate),axis=0)
    # y_train = np.concatenate((y_train,y_train_rotate),axis=0)
    # print(X_train.shape,y_train.shape)

    # X_train_flip = X_train[:,:,:,::-1]
    # y_train_flip = y_train[:,:,:,::-1]
    # X_train = np.concatenate((X_train,X_train_flip),axis=0)
    # y_train = np.concatenate((y_train,y_train_flip),axis=0)

    # X_train_flip = X_train[:,:,::-1,:]
    # y_train_flip = y_train[:,:,::-1,:]
    # X_train = np.concatenate((X_train,X_train_flip),axis=0)
    # y_train = np.concatenate((y_train,y_train_flip),axis=0)

    imgs_train = X_train
    imgs_valid = X_test
    imgs_mask_train = y_train
    imgs_mask_valid = y_test

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    model_name = 'unet_seed_1024_epoch_30_batch_8_aug_rotate_64_64_shiftbn_sgd_srelu_res_atrous.hdf5'
    model_checkpoint = ModelCheckpoint('E:\\UltrasoundNerve\\' + model_name,
                                       monitor='loss',
                                       save_best_only=True)
    plot(model,
         to_file='E:\\UltrasoundNerve\\%s.png' %
         model_name.replace('.hdf5', ''),
         show_shapes=True)
    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    augmentation = False
    batch_size = 128
    nb_epoch = 20
    load_model = False
    use_all_data = False

    if use_all_data:
        imgs_train = np.concatenate((imgs_train, imgs_valid), axis=0)
        imgs_mask_train = np.concatenate((imgs_mask_train, imgs_mask_valid),
                                         axis=0)

    # model.load_weights('E:\\UltrasoundNerve\\'+"unet_seed_1024_epoch_20_aug_rotate_64_80_shiftbn_sgd_srelu_plus5.hdf5")
    # model.save_weights('E:\\UltrasoundNerve\\'+'unet_seed_1024_epoch_20_aug_rotate_64_80_shiftbn_sgd_srelu_plus10.npy')
    # return

    if load_model:
        model.load_weights('E:\\UltrasoundNerve\\' + model_name)
    if not augmentation:
        model.fit(imgs_train,
                  imgs_mask_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  verbose=1,
                  shuffle=True,
                  callbacks=[model_checkpoint],
                  validation_data=[imgs_valid, imgs_mask_valid])
        pass
    else:

        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            45,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.0,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.0,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(imgs_train)
        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(imgs_train,
                                         imgs_mask_train,
                                         batch_size=batch_size),
                            samples_per_epoch=imgs_train.shape[0],
                            nb_epoch=nb_epoch,
                            callbacks=[model_checkpoint],
                            validation_data=(imgs_valid, imgs_mask_valid))

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = np.array([clahe(img) for img in imgs_test])

    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('E:\\UltrasoundNerve\\' + model_name)

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)
示例#21
0
def train(folder=0,
          weights_saving_path='weights',
          lr=0.001,
          weights=None,
          resuming_iter=0):
    net = ResUNet(pretrained=True)
    net.cuda()

    if weights is not None:
        print('Resume training from iter {:d}.'.format(resuming_iter))
        state_dict = torch.load(weights)['state_dicts']
        net.load_state_dict(state_dict)

    train_images, train_depths, train_masks = load_train_data(folder)
    val_images, val_depths, val_masks = load_val_data(folder)

    batch_size = 32
    one_epoch_iters = len(train_images) // batch_size
    saving_interval = one_epoch_iters // 2
    max_iters = one_epoch_iters * 500

    optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-6)

    bce_loss_layer = torch.nn.BCELoss()
    bce_loss_layer.cuda()

    if not os.path.exists(weights_saving_path):
        os.makedirs(weights_saving_path)

    print('batch_size = {:d}'.format(batch_size))
    print('one epoch iterations = {:d}'.format(one_epoch_iters))
    print('saving interval = {:d}'.format(saving_interval))
    print('max iterations = {:d} (~ {:d} epoches)'.format(
        max_iters, max_iters * batch_size // len(train_images)))
    print('lr = {:f}'.format(lr))

    time_str = strftime('%Y-%m-%d_%H-%M-%S', gmtime())
    file_name_suffix = '{:d}_{:s}'.format(folder, time_str)

    losses_file = open('losses_' + file_name_suffix + '.log', 'w')
    avg_losses_file = open('avg_losses_' + file_name_suffix + '.log', 'w')
    scores_file = open('scores_' + file_name_suffix + '.log', 'w')
    val_losses_file = open('val_losses_' + file_name_suffix + '.log', 'w')

    moving_avg_num = 50
    losses = []
    iter_idx = resuming_iter
    val_score = 0
    loss_changed = False

    if weights is not None:
        loss_name = 'bce'
        val_loss, val_score = validate(net, val_images, val_depths, val_masks,
                                       loss_name)

    while iter_idx < max_iters:
        # randomly pick one batch
        picked_samples = np.random.random_integers(0,
                                                   len(train_images) - 1,
                                                   batch_size)
        batch_images = train_images[picked_samples]
        batch_depths = train_depths[picked_samples]
        batch_masks = train_masks[picked_samples]

        input1 = torch.cuda.FloatTensor(batch_images)
        # input2 = torch.cuda.FloatTensor(batch_depths)

        targets = torch.cuda.FloatTensor(batch_masks)
        logits = net.forward(input1)

        if not loss_changed and (val_score < 0.81
                                 and iter_idx <= 30 * one_epoch_iters):
            # if weights is None and iter_idx < first_stage_iters:
            loss = bce_loss_layer.forward(F.sigmoid(logits), targets)
            loss_name = 'bce'
        elif not loss_changed:
            loss = criterion(logits, targets)
            loss_name = 'lovasz'
            losses = []
            optimizer = torch.optim.Adam(net.parameters(),
                                         lr=lr * .5,
                                         weight_decay=1e-6)
            loss_changed = True
        else:
            # loss has already changed, so we don't care the val score
            loss = criterion(logits, targets)
            loss_name = 'lovasz'

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        iter_idx += 1

        loss = loss.item()
        losses.append(loss)
        moving_avg_loss = sum(losses[-moving_avg_num:])
        moving_avg_loss /= len(losses[-moving_avg_num:])
        losses = losses[-moving_avg_num:]

        losses_file.write('{:d} {:f}\n'.format(iter_idx, loss))
        avg_losses_file.write('{:d} {:f}\n'.format(iter_idx, moving_avg_loss))

        if iter_idx % saving_interval == 0:
            print(
                '[Iter {:8d}] [{:s} loss {:8.4f}]  [avg_loss {:8.4f}] '.format(
                    iter_idx, loss_name, loss, moving_avg_loss),
                end='',
                flush=False)
            val_loss, val_score = validate(net, val_images, val_depths,
                                           val_masks, loss_name)
            print('[val_{:s}_loss {:8.4f}] [val_score {:8.4f}]'.format(
                loss_name, val_loss, val_score))
            saving_name = os.path.join(
                weights_saving_path,
                'iter_{:d}_loss_{:.4f}_score_{:.4f}.weights'.format(
                    iter_idx, val_loss, val_score))
            torch.save({'state_dicts': net.state_dict()}, saving_name)

            val_losses_file.write('{:d} {:f}\n'.format(iter_idx, val_loss))
            scores_file.write('{:d} {:f}\n'.format(iter_idx, val_score))

        else:
            print('[Iter {:8d}] [{:s} loss {:8.4f}]  [avg_loss {:8.4f}]\r'.
                  format(iter_idx, loss_name, loss, moving_avg_loss),
                  end='',
                  flush=False)

        if iter_idx % 20 == 0:
            # flush the log buffer, so we can plot the results in time.
            losses_file.flush()
            avg_losses_file.flush()
            scores_file.flush()
            val_losses_file.flush()
示例#22
0
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    plot_model(model, to_file='model.png', show_shapes=True)
    model_checkpoint = ModelCheckpoint('unet.hdf5',
                                       monitor='loss',
                                       save_best_only=True)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    datagen = ImageDataGenerator(
        rotation_range=5,
        vertical_flip=True,
        horizontal_flip=True,
    )
    model.fit_generator(datagen.flow(imgs_train,
                                     imgs_mask_train,
                                     batch_size=32,
                                     shuffle=True),
                        samples_per_epoch=len(imgs_train),
                        nb_epoch=120,
                        verbose=1,
                        callbacks=[model_checkpoint])

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('unet.hdf5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]
    y_hat_train = np.max(imgs_mask_train, axis=(1, 2, 3))
    print("total:", y_hat_train.shape, np.unique(y_hat_train), "label-1:", np.sum(y_hat_train))

    y_hat_train_sums = np.sum(imgs_mask_train, axis=(1, 2, 3))
    print(y_hat_train_sums)
    y_hat_train_sums_nonzero_ids = np.nonzero(y_hat_train_sums)[0]
    y_hat_train_sums = y_hat_train_sums[y_hat_train_sums_nonzero_ids]
    print(y_hat_train_sums.shape, np.min(y_hat_train_sums), np.max(y_hat_train_sums), np.mean(y_hat_train_sums))
    # y = np.bincount(y_hat_train_sums.astype('int32'))
    # ii = np.nonzero(y)[0]
    # count = y[ii]
    # from matplotlib import pyplot as plt
    # plt.plot(ii, count)
    # plt.show()
    # raw_input("pause, input any to continue")

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    n_fold = 5
    k_fold_train(imgs_train, y_hat_train, n_fold=n_fold)

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    # model = get_unet()
    model = get_keras_example_net()
    results = []
    for i in range(n_fold):
        model.load_weights('unet_fold%s.hdf5' % i)
        print('-' * 30)
        print('%s Predicting masks on test data...' % i)
        print('-' * 30)
        imgs_mask_test = model.predict(imgs_test, verbose=1)
        results.append(imgs_mask_test)
    imgs_mask_test = reduce(lambda x, y: x + y, results) / n_fold
    np.save('imgs_mask_test_nfold.npy', imgs_mask_test)
示例#24
0
def train_and_predict():

    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()
    imgs_mask_train = np.bool_(imgs_mask_train)
    imgs_mask_train = ~imgs_mask_train  # switch 1 and 0 so in format that works correctly with dice coefficient

    # imgs_train = preprocess_images(imgs_train)
    # imgs_mask_train = preprocess_masks(imgs_mask_train)

    # imgs_train = imgs_train.astype('float32')
    # mean = np.mean(imgs_train, axis=3)  # mean for data centering
    # std = np.std(imgs_train, axis=3)  # std for data normalization

    # imgs_train -= mean
    # imgs_train /= std

    # imgs_mask_train = imgs_mask_train.astype('float32')
    # imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint(weight_path,
                                       monitor='val_loss',
                                       save_best_only=True)
    tensorboard = TensorBoard(log_dir='./logs/3',
                              histogram_freq=1,
                              write_graph=True)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    model.fit(
        imgs_train,
        imgs_mask_train,
        batch_size=32,
        nb_epoch=20,
        verbose=1,
        shuffle=True,
        validation_split=
        0,  # <- changed from 0.2 because of ResourceExhaustedError during val
        callbacks=[model_checkpoint, tensorboard])

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id_test = load_test_data()
    # imgs_test = preprocess_imgs(imgs_test)

    # imgs_test = imgs_test.astype('float32')
    # imgs_test -= mean
    # imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights(weight_path)

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('rg35K_test_mask_preds3.npy',
            imgs_mask_test)  # name the name according to annotation type

    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)
    pred_dir = prediction_path + 'preds_rg/'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)
    for image, image_id in zip(imgs_mask_test, imgs_id_test):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        savepath = os.path.join(pred_dir, str(image_id) + '_pred.png')
        savepath = savepath.replace('gs/', '')
        imsave(savepath, image)
示例#25
0
def problem2():
    data = load_train_data()
    graph_p2(solve2(data))
示例#26
0
文件: train.py 项目: eglrp/U-net-4
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    log_filepath = '/tmp/logs/run_c'
    model = get_unet()

    #Visualize model
    plot_model(model, 'u-net-model-architecture.png', show_shapes=True)
    #model_file_format = 'model.{epoch:03d}.hdf5'
    #print model_file_format

    model_checkpoint = ModelCheckpoint('weights.h5',
                                       monitor='val_loss',
                                       save_best_only=True)
    tb_cb = TensorBoard(log_dir=log_filepath,
                        write_images=False,
                        histogram_freq=1,
                        write_graph=True)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=32,
              nb_epoch=20,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint, tb_cb])

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights.h5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)

    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)
    pred_dir = 'preds'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)
    else:
        shutil.rmtree(pred_dir)
        os.mkdir(pred_dir)

    for image, image_id in zip(imgs_mask_test, imgs_id_test):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
示例#27
0
def train_and_predict():
    print("-" * 30)
    print("Loading and preprocessing train data")
    print("-" * 30)

    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    imgs_mask_train = imgs_mask_train.astype('float32')

    mean = np.mean(imgs_train)
    std = np.std(imgs_train)

    # normalise
    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train /= 255.

    print("-" * 30)
    print("Creating and compiling model...")
    print("-" * 30)

    model = get_unet()

    checkpoint = ModelCheckpoint(filepath='weights.hdf5',
                                 monitor='val_loss',
                                 save_best_only=True)

    print("-" * 30)
    print("Fitting Model")
    print("-" * 30)

    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=32,
              epochs=20,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[checkpoint])

    print("-" * 30)
    print("Loading and preprocessing test data ...")
    print("-" * 30)

    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print("-" * 30)
    print("Loading saved weights ...")
    print("-" * 30)
    model.load_weights('weights.hdf5')

    print("-" * 30)
    print("Predicting on test data ...")
    print("-" * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)

    print("-" * 30)
    print("Saving predicted masks to files ...")
    print("-" * 30)

    pred_dir = 'preds'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)

    for image, image_id in zip(imgs_mask_test, imgs_id_test):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
unet.add(concatenated_de_convolution_layer(64))
unet.add(convolution_layer(64))
unet.add(convolution_layer(64))

unet.add(concatenated_de_convolution_layer(32))
unet.add(convolution_layer(32))
unet.add(convolution_layer(32))

unet.add(convolution_layer(1, kernel=(1, 1), activation='sigmoid'))

unet.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-5),
             loss=dice_coefficient_loss,
             metrics=[dice_coefficient])

x_train, y_train_mask = load_train_data()

x_train = preprocess(x_train)
y_train_mask = preprocess(y_train_mask)

x_train = x_train.astype('float32')
mean = np.mean(x_train)
std = np.std(x_train)

x_train -= mean
x_train /= std

y_train_mask = y_train_mask.astype('float32')
y_train_mask /= 255.

unet.fit(x_train,
示例#29
0
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    model.summary()

    model_checkpoint = ModelCheckpoint('unet.hdf5',
                                       monitor='val_loss',
                                       save_best_only=True)
    history = LossAccHistory()
    callbacks_list = [history, model_checkpoint]

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)

    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=16,
              epochs=30,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=callbacks_list)
    #model.save_weights('unet.hdf5', overwrite = True)

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('unet.hdf5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)
def train_and_predict():
    import numpy as np
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint('weights.h5',
                                       monitor='val_loss',
                                       save_best_only=True)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=32,
              nb_epoch=20,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint])

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)
    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights.h5')

    #import matplotlib.pyplot as plt
    import numpy as np
    '''''
    loss_x = list(map(int, np.linspace(1, len(train_loss_list), len(train_loss_list))))
    axes = plt.gca()
    # axes.set_ylim([min(train_loss_list+test_loss_list)[0] ,max(train_loss_list+test_loss_list)[0]])
    axes.set_ylabel('Loss')
    axes.set_xlabel('epoch')
    plt.plot(loss_x, train_loss_list, color='blue', marker='.', linestyle='-', linewidth=2, markersize=12,
             label='train')
    plt.plot(loss_x, test_loss_list, color='red', marker='.', linestyle='-', linewidth=2, markersize=12, label='test')
    plt.legend(loc='upper right')
    fig1 = plt.gcf()
    plt.show()
    plt.draw()
    fig1.savefig('./loss.png')
''' ''
    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)

    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)
    pred_dir = 'preds'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)
    for image, image_id in zip(imgs_mask_test, imgs_id_test):

        image = (image[:, :, 0] * 255.).astype(np.uint8)

        #imwrite(os.path.join(pred_dir, str(imgs_id) + '_pred.png'), image)
        imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)

    image_path = "./raw/test"
    for image_name in os.listdir(image_path):
        # for i in range(96):
        #for j in range(96):
        # if image[i][j] == 255:
        #    mask[i, j, 2] = 230
        # imsave(os.path.join(image_path,  '_mask.png'), img[0])
        imgid = image_name.split(".")[0]
        cv2.imwrite("./raw/predict/" + str(imgid) + "_mask.png", image)
示例#31
0
def runner():
    pro_dir = os.path.join(DATA_DIR, 'pro_sg')

    unique_sid = list()
    with open(os.path.join(pro_dir, 'unique_sid.txt'), 'r') as f:
        for line in f:
            unique_sid.append(line.strip())

    n_items = len(unique_sid)
    train_data = load_train_data(os.path.join(pro_dir, 'train.csv'), n_items)
    vad_data_tr, vad_data_te = load_tr_te_data(
        os.path.join(pro_dir, 'validation_tr.csv'),
        os.path.join(pro_dir, 'validation_te.csv'), n_items)

    # Set hyperparameters
    N = train_data.shape[0]
    idxlist = range(N)

    # training batch size
    batch_size = 200
    batches_per_epoch = int(np.ceil(float(N) / batch_size))

    N_vad = vad_data_tr.shape[0]
    idxlist_vad = range(N_vad)

    # validation batch size (since the entire validation set might not fit into GPU memory)
    batch_size_vad = 1000

    # the total number of gradient updates for annealing
    total_anneal_steps = 200000
    # largest annealing parameter
    anneal_cap = 0.2

    n_epochs = 200

    #
    # train a Multi-VAE^{PR}

    #The generative function is a [200 -> 600 -> n_items] MLP, which means the inference
    # function is a [n_items -> 600 -> 200] MLP. Thus the overall architecture for
    # the Multi-VAE^{PR} is [n_items -> 600 -> 200 -> 600 -> n_items].
    p_dims = [200, 600, n_items]

    tf.reset_default_graph()
    vae = MultiVAE(p_dims, lam=0.0, random_seed=98765)

    saver, logits_var, loss_var, train_op_var, merged_var = vae.build_graph()

    ndcg_var = tf.Variable(0.0)
    ndcg_dist_var = tf.placeholder(dtype=tf.float64, shape=None)
    ndcg_summary = tf.summary.scalar('ndcg_at_k_validation', ndcg_var)
    ndcg_dist_summary = tf.summary.histogram('ndcg_at_k_hist_validation',
                                             ndcg_dist_var)
    merged_valid = tf.summary.merge([ndcg_summary, ndcg_dist_summary])

    arch_str = "I-%s-I" % ('-'.join([str(d) for d in vae.dims[1:-1]]))

    log_dir = '/data/projects/vae_cf/ml-20m/log/VAE_anneal{}K_cap{:1.1E}/{}'.format(
        total_anneal_steps / 1000, anneal_cap, arch_str)

    if os.path.exists(log_dir):
        shutil.rmtree(log_dir)

    print("log directory: %s" % log_dir)
    summary_writer = tf.summary.FileWriter(log_dir,
                                           graph=tf.get_default_graph())

    chkpt_dir = '/data/projects/vae_cf/ml-20m/chkpt/VAE_anneal{}K_cap{:1.1E}/{}'.format(
        total_anneal_steps / 1000, anneal_cap, arch_str)
    if not os.path.isdir(chkpt_dir): os.makedirs(chkpt_dir)
    print("chkpt directory: %s" % chkpt_dir)

    idxlist = np.array(idxlist)
    ndcgs_vad = []
    with tf.Session() as sess:

        init = tf.global_variables_initializer()
        sess.run(init)

        best_ndcg = -np.inf

        update_count = 0.0

        for epoch in tqdm(range(n_epochs), total=n_epochs):
            np.random.shuffle(idxlist)
            # train for one epoch
            for bnum, st_idx in enumerate(range(0, N, batch_size)):
                end_idx = min(st_idx + batch_size, N)
                X = train_data[idxlist[st_idx:end_idx]]

                if sparse.isspmatrix(X):
                    X = X.toarray()
                X = X.astype('float32')

                if total_anneal_steps > 0:
                    anneal = min(anneal_cap,
                                 1. * update_count / total_anneal_steps)
                else:
                    anneal = anneal_cap

                feed_dict = {
                    vae.input_ph: X,
                    vae.keep_prob_ph: 0.5,
                    vae.anneal_ph: anneal,
                    vae.is_training_ph: 1
                }
                sess.run(train_op_var, feed_dict=feed_dict)

                if bnum % 100 == 0:
                    summary_train = sess.run(merged_var, feed_dict=feed_dict)
                    summary_writer.add_summary(
                        summary_train,
                        global_step=epoch * batches_per_epoch + bnum)

                update_count += 1

            # compute validation NDCG
            ndcg_dist = []
            for bnum, st_idx in enumerate(range(0, N_vad, batch_size_vad)):
                end_idx = min(st_idx + batch_size_vad, N_vad)
                X = vad_data_tr[idxlist_vad[st_idx:end_idx]]

                if sparse.isspmatrix(X):
                    X = X.toarray()
                X = X.astype('float32')

                pred_val = sess.run(logits_var, feed_dict={vae.input_ph: X})
                # exclude examples from training and validation (if any)
                pred_val[X.nonzero()] = -np.inf
                ndcg_dist.append(
                    NDCG_binary_at_k_batch(
                        pred_val, vad_data_te[idxlist_vad[st_idx:end_idx]]))

            ndcg_dist = np.concatenate(ndcg_dist)
            ndcg_ = ndcg_dist.mean()
            ndcgs_vad.append(ndcg_)
            merged_valid_val = sess.run(merged_valid,
                                        feed_dict={
                                            ndcg_var: ndcg_,
                                            ndcg_dist_var: ndcg_dist
                                        })
            summary_writer.add_summary(merged_valid_val, epoch)

            # update the best model (if necessary)
            if ndcg_ > best_ndcg:
                saver.save(sess, '{}/model'.format(chkpt_dir))
                best_ndcg = ndcg_

    # test model afterwards:
    test_data_tr, test_data_te = load_tr_te_data(
        os.path.join(pro_dir, 'test_tr.csv'),
        os.path.join(pro_dir, 'test_te.csv'), n_items)
    N_test = test_data_tr.shape[0]
    idxlist_test = range(N_test)

    batch_size_test = 2000
    tf.reset_default_graph()
    vae = MultiVAE(p_dims, lam=0.0)
    saver, logits_var, _, _, _ = vae.build_graph()

    chkpt_dir = '/data/projects/vae_cf/ml-20m/chkpt/VAE_anneal{}K_cap{:1.1E}/{}'.format(
        total_anneal_steps / 1000, anneal_cap, arch_str)
    print("chkpt directory: %s" % chkpt_dir)

    n100_list, r20_list, r50_list = [], [], []

    with tf.Session() as sess:
        saver.restore(sess, '{}/model'.format(chkpt_dir))

        for bnum, st_idx in enumerate(range(0, N_test, batch_size_test)):
            end_idx = min(st_idx + batch_size_test, N_test)
            X = test_data_tr[idxlist_test[st_idx:end_idx]]

            if sparse.isspmatrix(X):
                X = X.toarray()
            X = X.astype('float32')

            pred_val = sess.run(logits_var, feed_dict={vae.input_ph: X})
            # exclude examples from training and validation (if any)
            pred_val[X.nonzero()] = -np.inf
            n100_list.append(
                NDCG_binary_at_k_batch(
                    pred_val,
                    test_data_te[idxlist_test[st_idx:end_idx]],
                    k=100))
            r20_list.append(
                Recall_at_k_batch(pred_val,
                                  test_data_te[idxlist_test[st_idx:end_idx]],
                                  k=20))
            r50_list.append(
                Recall_at_k_batch(pred_val,
                                  test_data_te[idxlist_test[st_idx:end_idx]],
                                  k=50))

    n100_list = np.concatenate(n100_list)
    r20_list = np.concatenate(r20_list)
    r50_list = np.concatenate(r50_list)

    print("Test NDCG@100=%.5f (%.5f)" %
          (np.mean(n100_list), np.std(n100_list) / np.sqrt(len(n100_list))))
    print("Test Recall@20=%.5f (%.5f)" %
          (np.mean(r20_list), np.std(r20_list) / np.sqrt(len(r20_list))))
    print("Test Recall@50=%.5f (%.5f)" %
          (np.mean(r50_list), np.std(r50_list) / np.sqrt(len(r50_list))))
示例#32
0
columns = [
    'target', 'ps_car_13', 'ps_reg_03', 'ps_car_14', 'ps_car_11_cat',
    'ps_car_12', 'ps_car_01_cat', 'ps_ind_05_cat', 'ps_reg_02',
    'ps_ind_06_bin', 'ps_ind_07_bin', 'ps_car_04_cat', 'ps_ind_03',
    'ps_car_15', 'ps_car_06_cat', 'ps_ind_17_bin', 'ps_car_07_cat',
    'ps_car_03_cat', 'ps_car_02_cat', 'ps_reg_01', 'ps_ind_16_bin',
    'ps_car_09_cat', 'ps_ind_15', 'ps_car_05_cat', 'ps_car_11',
    'ps_car_08_cat', 'ps_ind_01', 'ps_ind_04_cat', 'ft_pca'
]

#
# Loading data...
#
print("Loading data...")
train_x, train_y, train_id = data.load_train_data(columns)

print('Training...')
dae = model.DualAutoencoder(len(columns) - 1, [40, 40, 40, 40])
dae.optimize(train_x, steps=10000, batch_size=10000)

feature_columns = [
    'ft_dae_' + ('0' + str(i) if i < 10 else str(i)) for i in range(1, 81)
]

print('Predicting train...')
predict = pd.DataFrame(dae.predict(train_x), columns=feature_columns)
predict['id'] = pd.DataFrame(train_id, columns=['id'])

print('Exporting train...')
predict.to_csv('./output/ft_dae_vars2_train.csv', sep=',', index=False)
示例#33
0
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint('weights.h5',
                                       monitor='val_loss',
                                       save_best_only=True)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=32,
              nb_epoch=20,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint])

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights.h5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)

    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)
    pred_dir = 'preds'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)
    for image, image_id in zip(imgs_mask_test, imgs_id_test):
        image = (image[:, :, 0] * 255.).astype(np.uint8)
        imsave(os.path.join(pred_dir, str(image_id) + '_pred.png'), image)
示例#34
0
def train_and_predict():
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)

    imgs_train, coeffs_train = load_train_data()
    masked = np.ma.masked_values(coeffs_train[:,0],0.0)

    
    imgs_train = imgs_train[~masked.mask,...]
    coeffs_train = coeffs_train[~masked.mask,...]

    imgs_train = preprocess(imgs_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std
    coeffs_train = coeffs_train[:,0:2]
    coeffs_train = coeffs_train.astype('float32')

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = resnet(img_rows,img_cols)
    model.compile(optimizer=Adam(lr=1e-5), loss='mean_squared_error')

    model_checkpoint = ModelCheckpoint('resnet.hdf5', monitor='loss', save_best_only=True)

    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    #print (coeffs_train)
    prog = ProgbarLogger()
    model.fit(imgs_train, coeffs_train, batch_size=64, nb_epoch=20, verbose=1, shuffle=True,
              callbacks=[prog,model_checkpoint],validation_split = 0.2)

    coeffs_train = model.predict(imgs_train, verbose=1)
    np.save('coeffs_train_predicted.npy',coeffs_train)

    print('-'*30)
    print('Loading and preprocessing valid data...')
    print('-'*30)
    imgs_valid, coeffs_valid = load_valid_data()
    imgs_valid = preprocess(imgs_valid)

    imgs_valid = imgs_valid.astype('float32')
    imgs_valid -= mean
    imgs_valid /= std

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('resnet.hdf5')

    print('-'*30)
    print('Predicting  on valid data...')
    print('-'*30)
    coeffs_valid_predicted = model.predict(imgs_valid, verbose=1)
    np.save('coeffs_valid_predicted.npy', coeffs_valid_predicted)





    print('-'*30)
    print('Loading and preprocessing valid data...')
    print('-'*30)
    imgs_valid, coeffs_valid = load_valid_data()
    imgs_valid = preprocess(imgs_valid)

    imgs_valid = imgs_valid.astype('float32')
    imgs_valid -= mean
    imgs_valid /= std

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('unet.hdf5')

    print('-'*30)
    print('Predicting  on valid data...')
    print('-'*30)
    coeffs_valid_predicted = model.predict(imgs_valid, verbose=1)
    np.save('coeffs_valid_predicted.npy', coeffs_valid_predicted)





    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('resnet.hdf5')

    print('-'*30)
    print('Predicting  on test data...')
    print('-'*30)
    coeffs_test = model.predict(imgs_test, verbose=1)
    np.save('coeffs_test.npy', coeffs_test)

    dif = coeffs_valid[:,0:2] - coeffs_valid_predicted
    print(dif.shape)
    dif = dif[coeffs_valid[:,0]>0,:]
    print(dif.shape)	
    check = np.mean(np.sum(dif*dif,axis=1))
    print('check',check)	
示例#35
0
        max_epochs=150,
        on_epoch_finished=[
            AdjustVariable('update_learning_rate',
                           epochs=[50, 100],
                           rates=[2e-3, 2e-4])
        ],
        regularization_rate=1e-5,
        batch_iterator_train=BatchIterator(batch_size=128),
        objective_loss_function=loss,
        y_tensor_type=y_tensor_type)

    return net0


np.random.seed(1)
X, y, encoder, scaler = data.load_train_data('data/train.csv')
X_test, ids = data.load_test_data('data/test.csv', scaler)
num_classes = len(encoder.classes_)
num_features = X.shape[1]

skf = cv.StratifiedKFold(y, 5)
train, val = next(iter(skf))

X_train, y_train = X[train], y[train]
X_val, y_val = X[val], y[val]
net1 = build_net()
net1.fit(X_train, y_train)
predicted_val = np.array(net1.predict_proba(X_val))
loss1 = log_loss(y[val], predicted_val)
print "loss1", loss1
示例#36
0
def main():
    # import time
    # start = time.time()
    # model = resnet()
    # duration = time.time() - start
    # print("{} s to make model".format(duration))

    # start = time.time()
    # model.output
    # duration = time.time() - start
    # print("{} s to get output".format(duration))

    # start = time.time()
    # model.compile(loss="categorical_crossentropy", optimizer="sgd")
    # duration = time.time() - start
    # print("{} s to get compile".format(duration))

    # current_dir = os.path.dirname(os.path.realpath(__file__))
    # model_path = os.path.join(current_dir, "resnet_50.png")
    # plot(model, to_file=model_path, show_shapes=True)
    # exit()
# -----------------------------------------------------------------------------
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train, rows,cols)
    imgs_mask_train = preprocess(imgs_mask_train, rows/2,cols/2)

    imgs_train = imgs_train.astype('float32')
    mean = imgs_train.mean(0)[np.newaxis,:]  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization
    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = resnet()
    # model.load_weights('resnet.hdf5')
    
    model_checkpoint = ModelCheckpoint('resnet.hdf5', monitor='loss',verbose=1, save_best_only=True)
# ----------------------------------------------------------------------- 
    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    model.fit(imgs_train, imgs_mask_train, batch_size=32, nb_epoch=20, verbose=1, shuffle=True, callbacks=[model_checkpoint])
    # for i in range(3):
    #     model.train(imgs_train[:3],imgs_mask_train[:3])

    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test, rows,cols)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('resnet.hdf5')

    print('-'*30)
    print('Predicting masks on test data...')
    print('-'*30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)

    print('-'*30)
    print('Predicting masks on train data...')
    print('-'*30)
    imgs_mask_test = model.predict(imgs_train, verbose=1)
    np.save('imgs_train_pred.npy', imgs_mask_test)
示例#37
0
def train_and_predict():
    stats = {}
    pp_head(config)
    open('config.txt', 'w').write(str(config))
    
    if  True or config['FIT']:
        pp_head('Loading and preprocessing train data...')
        imgs_train, imgs_mask_train = load_train_data()
    
        imgs_train = preprocess(imgs_train)
        imgs_mask_train = preprocess(imgs_mask_train)
    
        imgs_train = imgs_train.astype('float32')
        stats['mean'] = np.mean(imgs_train)  # mean for data centering
        stats['std'] = np.std(imgs_train)  # std for data normalization
    
        imgs_train -= stats['mean']
        imgs_train /= stats['std']
        
        imgs_mask_train = imgs_mask_train.astype('float32')
        imgs_mask_train /= 255.  # scale masks to [0, 1]
        
        open('stats.txt', 'w').write(str(stats))
    else:
        stats = eval(open('stats.txt', 'r').read()) # Read previously saved values from a file, needed to transform test images

    pp_head('Creating and compiling model...')
    if config['LOAD_MODEL']:
        model = model_from_json(open('my_model_architecture.json').read())
    else:
        model = get_unet()
        json_string = model.to_json()
        open('my_model_architecture.json', 'w').write(json_string)
    
    if config['LOAD_LAST']:
        model.load_weights('unet.hdf5')

    if config['FIT']:
        pp_head('Fitting model...')
        model_checkpoint = ModelCheckpoint('unet.hdf5', monitor='loss', save_best_only=True)
        model_checkpoint2 = ModelCheckpoint("weights.{epoch:02d}-{loss:.2f}.hdf5", monitor='loss', save_best_only=True)
        model.fit(imgs_train, imgs_mask_train,validation_split=config['VALIDATION'], batch_size=config['BATCH'], nb_epoch=config['EPOCH'], verbose=1, shuffle=True,
                  callbacks=[model_checkpoint,model_checkpoint2]) # batch size originally 32
    #else:
    #    model.test_on_batch(imgs_train, imgs_mask_train)

    pp_head(str(model.summary()))

    pp_head('Loading and preprocessing test data...')
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test, True)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= stats['mean']
    imgs_test /= stats['std']

    pp_head('Loading saved weights...')
    model.load_weights('unet.hdf5')

    pp_head('Predicting masks on test data...')
    imgs_mask_test = model.predict(imgs_test, verbose=1) # USe batch to speed up on large picture
    #imgs_mask_test = model.predict(imgs_test,1, verbose=1) # USe batch to speed up on large picture
    
    np.save('imgs_mask_test.npy', imgs_mask_test)
示例#38
0
def train_and_predict():

    print('-' * 30)
    print('train info...')
    print('date = {0}'.format(datetime.now().strftime('%m/%d/%y %H:%M:%S')))
    print('input h,w,c = {0},{1},{2}'.format(img_rows, img_cols, img_channels))
    print('weights = {0}'.format(weights))
    print('train size = {0}*{1}'.format(train_group_size,
                                        train_size / train_group_size))
    print('nb epoch = {0}'.format(nb_epoch))
    print('batch size = {0}'.format(batch_size))
    print('test size = {0}'.format(test_size))
    print('-' * 30)

    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    train_images, train_masks = load_train_data()
    train_images = preprocess(train_images, train_size)
    train_masks = preprocess(train_masks, train_size)
    train_images = train_images.astype('float32')
    mean = np.mean(train_images)  # mean for data centering
    std = np.std(train_images)  # std for data normalization
    train_images -= mean
    train_images /= std
    train_masks = train_masks.astype('float32')
    train_masks /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights(
        weights)  # keep this commented if no pretrained waights are needed
    model_checkpoint = ModelCheckpoint(weights,
                                       monitor='val_loss',
                                       save_best_only=True)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    model.fit(train_images,
              train_masks,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint])

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    test_images, test_image_ids = load_test_data()
    test_images = preprocess(test_images, test_size)
    test_images = test_images.astype('float32')
    test_images -= mean
    test_images /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights(weights)

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    test_masks = model.predict(test_images, verbose=1)

    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)
    save_test_results(test_masks, test_image_ids)
def process_data():
    imgs_train, imgs_mask_train = load_train_data()
    
    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)
    
    

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization
    
    imgs_train -= mean
    imgs_train /= std
    
    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]
    
    y_bin = np.array([mask_not_blank(mask) for mask in imgs_mask_train ])
    

    skf = StratifiedKFold(y_bin, n_folds=10, shuffle=True, random_state=seed)
    for ind_tr, ind_te in skf:
        X_train = imgs_train[ind_tr]
        X_test = imgs_train[ind_te]
        y_train = imgs_mask_train[ind_tr]
        y_test = imgs_mask_train[ind_te]
        break
    
    
    X_train_rotate = get_rotation(X_train)
    y_train_rotate  = get_rotation(y_train)
    X_train = np.concatenate((X_train,X_train_rotate),axis=0)
    y_train = np.concatenate((y_train,y_train_rotate),axis=0)
    print(X_train.shape,y_train.shape)
    
    X_train_rotate = get_rotation(X_train,degree=22.5)
    y_train_rotate  = get_rotation(y_train,degree=22.5)
    X_train = np.concatenate((X_train,X_train_rotate),axis=0)
    y_train = np.concatenate((y_train,y_train_rotate),axis=0)
    print(X_train.shape,y_train.shape)
    
    
    # X_train_rotate = get_rotation(X_train,degree=11.25)
    # y_train_rotate  = get_rotation(y_train,degree=11.25)
    # X_train = np.concatenate((X_train,X_train_rotate),axis=0)
    # y_train = np.concatenate((y_train,y_train_rotate),axis=0)
    # print(X_train.shape,y_train.shape)
    
    X_train_flip = X_train[:,:,:,::-1]
    y_train_flip = y_train[:,:,:,::-1]
    X_train = np.concatenate((X_train,X_train_flip),axis=0)
    y_train = np.concatenate((y_train,y_train_flip),axis=0)
    
    
    X_train_flip = X_train[:,:,::-1,:]
    y_train_flip = y_train[:,:,::-1,:]
    X_train = np.concatenate((X_train,X_train_flip),axis=0)
    y_train = np.concatenate((y_train,y_train_flip),axis=0)
    
    print(X_train.shape,y_train.shape)

    imgs_train = X_train
    imgs_valid = X_test
    imgs_mask_train = y_train
    imgs_mask_valid = y_test
    
    imgs_train = lasagne.utils.floatX(imgs_train)
    imgs_valid = lasagne.utils.floatX(imgs_valid)

    imgs_mask_train = lasagne.utils.floatX(imgs_mask_train)
    imgs_mask_valid = lasagne.utils.floatX(imgs_mask_valid)


    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)
    
    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std
    imgs_test = lasagne.utils.floatX(imgs_test)


    return imgs_train,imgs_valid,imgs_mask_train,imgs_mask_valid,imgs_test
示例#40
0
    def TrainModel(self, TMentries):
        args = self.arggen(TMentries)
        data_path, mName, epchs, vspl, nCl = args[0], args[2], int(
            args[3]), float(args[4]), int(args[5])
        if nCl > 1:
            nLab = nCl + 1
        else:
            nLab = nCl
        print('-' * 30)
        print('Loading and preprocessing train data...')
        print('-' * 30)
        imgs_train, imgs_mask_train = load_train_data(data_path, train_npy,
                                                      labels_npy)
        img_rows, img_cols = imgs_train[0].shape[0], imgs_train[0].shape[1]

        imgs_train = preprocess(imgs_train, img_rows, img_cols)
        imgs_mask_train = preprocess(imgs_mask_train, img_rows, img_cols)
        imgs_train = imgs_train.astype('float32')
        mean = np.mean(imgs_train)  # mean for data centering
        std = np.std(imgs_train)  # std for data normalization

        imgs_train -= mean
        imgs_train /= std

        #        imgs_mask_train = imgs_mask_train.astype('float32')
        #        imgs_mask_train /= 255.  # scale masks to [0, 1]
        #        imgs_mask_train *= (nCl)  # generates the labes as integers
        imgs_mask_train = imgs_mask_train.astype('uint8')
        if np.max(imgs_mask_train) != nCl:
            print(
                'Warning: the number of classes does not matches with the intesities of the label images'
            )
        if nLab > 1:
            imgs_mask_train = getSegmentationArr(imgs_mask_train, nLab)
#            global imgs_mask_train2
#            imgs_mask_train2 = np.copy(imgs_mask_train)
        else:
            imgs_mask_train[imgs_mask_train > 0.5] = 1
            imgs_mask_train[imgs_mask_train <= 0.5] = 0
        print(imgs_mask_train.shape)
        print('-' * 30)
        print('Creating and compiling model...')
        print('-' * 30)

        #    model = get_unet(imgs_train[0].shape)
        #    model = Model4(imgs_train[0].shape)
        if os.path.exists(os.path.join(data_path, model_p)) and len([
                x for x in os.listdir(os.path.join(data_path, model_p))
                if ('.hdf5') in x
        ]) > 0:
            print('loading weights and compiling the model')
            latest = max(glob.glob(os.path.join(data_path, model_p, '*.hdf5')),
                         key=os.path.getctime)
            model = getattr(Nmodels, mName)(nLab, imgs_train[0].shape, latest)
        else:
            if not os.path.exists(os.path.join(data_path, model_p)):
                os.makedirs(os.path.join(data_path, model_p))
            model = getattr(Nmodels, mName)(nLab, imgs_train[0].shape, '')

    #    model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
        model_checkpoint = ModelCheckpoint(os.path.join(
            data_path, model_p,
            'weights.ep{epoch:02d}-il{loss:.3f}-vl{val_loss:.3f}.hdf5'),
                                           monitor='loss',
                                           verbose=1,
                                           save_best_only=True)

        print('-' * 30)
        print('Fitting model...')
        print('-' * 30)
        #    model.fit(imgs_train, imgs_mask_train, batch_size=34, nb_epoch=20, verbose=1, shuffle=True,
        #              validation_split=0.2,
        #              callbacks=[model_checkpoint])
        model.fit(imgs_train,
                  imgs_mask_train,
                  batch_size=10,
                  epochs=epchs,
                  verbose=1,
                  validation_split=vspl,
                  shuffle=True,
                  callbacks=[model_checkpoint])
        return
示例#41
0
文件: train_sq.py 项目: mgood2/kaggle

def dice_coef_loss(y_true, y_pred):
    return -dice_coef(y_true, y_pred)

def preprocess(imgs):
    imgs_p = np.ndarray((imgs.shape[0], imgs.shape[1], img_rows, img_cols), dtype=np.uint8)
    for i in range(imgs.shape[0]):
        imgs_p[i, 0] = cv2.resize(imgs[i, 0], (img_cols, img_rows), interpolation=cv2.INTER_CUBIC)
    return imgs_p


print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train, imgs_mask_train = load_train_data()

imgs_train = preprocess(imgs_train)
imgs_mask_train = preprocess(imgs_mask_train)

imgs_train = imgs_train.astype('float32')
mean = np.mean(imgs_train)  # mean for data centering
std = np.std(imgs_train)  # std for data normalization

imgs_train -= mean
imgs_train /= std

imgs_mask_train = imgs_mask_train.astype('float32')
imgs_mask_train /= 255.  # scale masks to [0, 1]

print('-'*30)
示例#42
0
def problem1():
    data = load_train_data()
    graph_p1(solve1(data))
示例#43
0

#preprocessing image size
img_rows = 64
img_cols = 80
smooth = 1.
def preprocess(imgs):
    imgs_p = np.ndarray((imgs.shape[0], imgs.shape[1], img_rows, img_cols), dtype=np.uint8)
    for i in range(imgs.shape[0]):
        imgs_p[i, 0] = cv2.resize(imgs[i, 0], (img_cols, img_rows), interpolation=cv2.INTER_CUBIC)
    return imgs_p

print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs, imgs_mask = load_train_data()

imgs = preprocess(imgs)
imgs_mask = preprocess(imgs_mask)

imgs = imgs.astype('float32')
mean = np.mean(imgs)  # mean for data centering
std = np.std(imgs)  # std for data normalization

imgs -= mean
imgs /= std

imgs_mask = imgs_mask.astype('float32')
imgs_mask /= 255.  # scale masks to [0, 1]

示例#44
0
def train_and_predict():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = create_model()

    print('-' * 30)
    print('Building data augmentation object...')
    print('-' * 30)
    datagen = ImageDataGenerator(rotation_range=15,
                                 width_shift_range=0.15,
                                 height_shift_range=0.15,
                                 shear_range=0.15,
                                 horizontal_flip=True,
                                 vertical_flip=True)

    total = imgs_train.shape[0]
    img = []
    count = 0
    for batch in datagen.flow(imgs_train, batch_size=1, seed=1337):
        img.append(batch)
        count += 1
        if count > total * stack:
            break
    imgs_train = np.array(img)[:, 0]

    mask = []
    count = 0
    for batch in datagen.flow(imgs_mask_train, batch_size=1, seed=1337):
        mask.append(batch)
        count += 1
        if count > total * stack:
            break
    imgs_mask_train = np.array(mask)[:, 0]

    callbacks = [
        EarlyStopping(monitor='loss', patience=5, verbose=0),
        ModelCheckpoint('weights.hdf5', monitor='loss', save_best_only=True)
    ]

    print('-' * 30)
    print('Begin training...')
    print('-' * 30)
    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=4,
              nb_epoch=100,
              verbose=1,
              shuffle=True,
              callbacks=callbacks)

    print('-' * 30)
    print('Loading and preprocessing test data...')
    print('-' * 30)
    imgs_test = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-' * 30)
    print('Loading saved weights...')
    print('-' * 30)
    model.load_weights('weights.hdf5')

    print('-' * 30)
    print('Predicting masks on test data...')
    print('-' * 30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)
def train_and_predict():
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train = load_train_data()
    
    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)
    
    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization
    
    imgs_train -= mean
    imgs_train /= std
    
    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]
    
    y_bin = np.array([mask_not_blank(mask) for mask in imgs_mask_train ])

    # X_train,X_test,y_train,y_test = train_test_split(imgs_train,imgs_mask_train,test_size=0.2,random_state=seed)
    
    skf = StratifiedKFold(y_bin, n_folds=10, shuffle=True, random_state=seed)
    for ind_tr, ind_te in skf:
        X_train = imgs_train[ind_tr]
        X_test = imgs_train[ind_te]
        y_train = imgs_mask_train[ind_tr]
        y_test = imgs_mask_train[ind_te]
        break

    X_train_flip = X_train[:,:,:,::-1]
    y_train_flip = y_train[:,:,:,::-1]
    X_train = np.concatenate((X_train,X_train_flip),axis=0)
    y_train = np.concatenate((y_train,y_train_flip),axis=0)


    X_train_flip = X_train[:,:,::-1,:]
    y_train_flip = y_train[:,:,::-1,:]
    X_train = np.concatenate((X_train,X_train_flip),axis=0)
    y_train = np.concatenate((y_train,y_train_flip),axis=0)
    
    
    imgs_train = X_train
    imgs_valid = X_test
    imgs_mask_train = y_train
    imgs_mask_valid = y_test
    
    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet()
    model_name = 'unet_seed_1024_epoch_20_aug_64_80_shiftbn_sgd_srelu_plus10.hdf5'
    model_checkpoint = ModelCheckpoint('E:\\UltrasoundNerve\\'+model_name, monitor='loss', save_best_only=True)
    plot(model, to_file='E:\\UltrasoundNerve\\%s.png'%model_name.replace('.hdf5',''),show_shapes=True)
    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    augmentation=False
    batch_size=128
    nb_epoch=10
    load_model=True
    use_all_data = False
    
    if use_all_data:
        imgs_train = np.concatenate((imgs_train,imgs_valid),axis=0)
        imgs_mask_train = np.concatenate((imgs_mask_train,imgs_mask_valid),axis=0)
    
    if load_model:
        model.load_weights('E:\\UltrasoundNerve\\'+model_name)
    if not augmentation:
        # model.fit(imgs_train, imgs_mask_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=True,
        #           callbacks=[model_checkpoint],
        #           validation_data=[imgs_valid,imgs_mask_valid]
        #           )
        pass
    else:
        
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=45,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.0,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.0, # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(imgs_train)
        # fit the model on the batches generated by datagen.flow()
        model.fit_generator(datagen.flow(imgs_train, imgs_mask_train,
                            batch_size=batch_size),
                            samples_per_epoch=imgs_train.shape[0],
                            nb_epoch=nb_epoch,
                            callbacks=[model_checkpoint],
                            validation_data=(imgs_valid,imgs_mask_valid))    
    
    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_id_test = load_test_data()
    imgs_test = preprocess(imgs_test)
    
    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('E:\\UltrasoundNerve\\'+model_name)

    print('-'*30)
    print('Predicting masks on test data...')
    print('-'*30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)
from __future__ import print_function

from keras.callbacks import ModelCheckpoint

from data import load_train_data, get_data_mean
from utils import *

create_paths()
log_file = open(global_path + "logs/log_file.txt", 'a')

# CEAL data definition
X_train, y_train = load_train_data()
labeled_index = np.arange(0, nb_labeled)
unlabeled_index = np.arange(nb_labeled, len(X_train))

# (1) Initialize model
model = get_unet(dropout=True)
# mean_data, std_data = get_data_mean()
# model.load_weights(initial_weights_path)
model.load_weights(global_path + "models/active_model10.h5")
print('input shape', X_train.shape)

# nb_labeled = 2640
test_num = 10

out = model.predict(X_train[nb_labeled:nb_labeled + test_num])
# x_train = (X_train*255).astype(np.uint8).transpose([0,2,3,1])
# print(x_train.shape)

import numpy as np
#print(np.unique(out))
示例#47
0
文件: otto-B.py 项目: sronen71/otto
        eval_size=0.0,
        verbose=1,
        max_epochs=150, 
        on_epoch_finished=[
            AdjustVariable('update_learning_rate',
                epochs=[50,100],rates=[2e-3,2e-4])],
        regularization_rate=1e-5,
        batch_iterator_train=BatchIterator(batch_size=128)
        )

    return net0



np.random.seed(1)
X,y,encoder,scaler,ids_val= data.load_train_data('data/train.csv')
X_test,ids=data.load_test_data('data/test.csv',scaler)
num_classes=len(encoder.classes_)
num_features=X.shape[1]


scores=[]
p=None

folds=5
eps=1e-4
delta=1
num=0
scores=np.zeros((y.shape[0],9))
prev_loss=10
while (delta>eps):
示例#48
0
def train_and_predict():
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_mask_train = load_train_data()

    imgs_train = preprocess(imgs_train)
    imgs_mask_train = preprocess(imgs_mask_train)

    imgs_train = imgs_train.astype('float32')
    mean = np.mean(imgs_train)  # mean for data centering
    std = np.std(imgs_train)  # std for data normalization

    imgs_train -= mean
    imgs_train /= std
    #Normalization of the train set

    imgs_mask_train = imgs_mask_train.astype('float32')

    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)
    model = get_unet()
    model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)
    #Saving the weights and the loss of the best predictions we obtained

    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    history=model.fit(imgs_train, imgs_mask_train, batch_size=10, epochs=20, verbose=1, shuffle=True,
              validation_split=0.2,
              callbacks=[model_checkpoint])
    model.save('my_model')

    print('-'*30)
    print('Loading and preprocessing test data...')
    print('-'*30)
    imgs_test, imgs_maskt = load_test_data()
    imgs_test = preprocess(imgs_test)

    imgs_test = imgs_test.astype('float32')
    imgs_test -= mean
    imgs_test /= std
    #Normalization of the test set

    print('-'*30)
    print('Loading saved weights...')
    print('-'*30)
    model.load_weights('weights.h5')

    print('-'*30)
    print('Predicting masks on test data...')
    print('-'*30)
    imgs_mask_test = model.predict(imgs_test, verbose=1)
    np.save('imgs_mask_test.npy', imgs_mask_test)
    print('-' * 30)
    print('Saving predicted masks to files...')
    print('-' * 30)
    pred_dir = 'preds'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)

    for k in range(len(imgs_mask_test)):
        a=rescale_intensity(imgs_test[k][:,:,0],out_range=(-1,1))
        b=(imgs_mask_test[k][:,:,0]).astype('uint8')
        io.imsave(os.path.join(pred_dir, str(k) + '_pred.png'),mark_boundaries(a,b))
    #Saving our predictions in the directory 'preds'
    plt.plot(history.history['dice_coef'])
    plt.plot(history.history['val_dice_coef'])
    plt.title('Model dice coeff')
    plt.ylabel('Dice coeff')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
示例#49
0
 def setUp(self):
     self.train_data = extract_features(load_train_data())
     self.test_data = extract_features(load_test_data())
示例#50
0
def train_and_predict():

    # Horovod: initialize Horovod.
    hvd.init()

    # Horovod: pin GPU to be used to process local rank (one GPU per process)
    config = tf.ConfigProto()
    config.intra_op_parallelism_threads = 10
    config.inter_op_parallelism_threads = 1
    K.set_session(tf.Session(config=config))

    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_mask_train = load_train_data()
    imgs_mask_train = imgs_mask_train[..., np.newaxis]
    #imgs_train = preprocess(imgs_train,'I')
    #imgs_mask_train = preprocess(imgs_mask_train,'M')
    #
    print(imgs_train.shape)
    print(imgs_mask_train.shape)
    imgs_train = imgs_train.astype('float32')
    #mean = np.mean(imgs_train)  # mean for data centering
    #std = np.std(imgs_train)  # std for data normalization

    #imgs_train -= mean
    #imgs_train /= std
    imgs_train /= 255.  # scale masks to [0, 1]

    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()

    print('hvd size:', hvd.size())
    print('learning rate:', .001 * hvd.size())

    callbacks = [
        # Horovod: broadcast initial variable states from rank 0 to all other processes.
        # This is necessary to ensure consistent initialization of all workers when
        # training is started with random weights or restored from a checkpoint.
        hvd.callbacks.BroadcastGlobalVariablesCallback(0),
    ]
    if hvd.rank() == 0:
        callbacks.append(
            keras.callbacks.ModelCheckpoint('/workspace/checkpoint-{epoch}.h5',
                                            monitor='val_loss',
                                            save_best_only=True))
    #model_checkpoint = ModelCheckpoint('weights.h5', monitor='val_loss', save_best_only=True)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=8,
              epochs=10,
              shuffle=True,
              validation_split=0.01,
              callbacks=callbacks,
              verbose=1 if hvd.rank() == 0 else 0)
    if hvd.rank() == 0:
        model.save('/workspace/unetmodel.h5', include_optimizer=False)