コード例 #1
0
def train_autoencoder(cross_epoch=0,
                      data_index=None,
                      brain_map=None,
                      cut_shape=None,
                      data_type=['MCIc', 'MCInc'],
                      pre_dir='/home/anzeng/rhb/fmri_data',
                      num_batches=512 * 5 + 1,
                      test_size=6):
    keras.backend.clear_session()
    batch_size = 100
    if cut_shape == None:
        brain_map = [212, 213, 214, 215, 216, 217, 218]
        cut_shape = [100, 0, 100, 0, 100, 0]
        mask = nib.load(
            '/home/anzeng/rhb/fmri/fMRI-deeping-learning/BN_Atlas_246_3mm.nii')
        mask = mask.get_fdata()
        #获取截取的sMRI大小
        for x in brain_map:
            tmp = np.where(mask == x)
            for i in range(3):
                cut_shape[2 * i] = min(cut_shape[2 * i], np.min(tmp[i]))
                cut_shape[2 * i + 1] = max(cut_shape[2 * i + 1],
                                           np.max(tmp[i]))
        print(cut_shape)
    # xyz = 32
    logs_path = os.path.join(cfg.voxnet_checkpoint_dir,
                             'train_' + str(cross_epoch))
    if os.path.isdir(logs_path):
        shutil.rmtree(logs_path)
    os.makedirs(logs_path)
    model = Autoencoder()
    dataset = fMRI_data(data_type,
                        data_index=data_index,
                        dir=pre_dir,
                        batch_mode='random',
                        varbass=cfg.varbass)
    model.fit_generator(
        dataset.get_smri_batch(cut_shape,
                               batch_size,
                               _batch_mode='oversampling',
                               _mode='train'),
        steps_per_epoch=8,
        epochs=num_batches,
        callbacks=[keras.callbacks.TensorBoard(log_dir=logs_path)])

    if not os.path.exists(cfg.voxnet_checkpoint_dir):
        os.makedirs(cfg.voxnet_checkpoint_dir)
    filepath = os.path.join(cfg.voxnet_checkpoint_dir,
                            'train_' + str(cross_epoch) + '.h5')
    # for i in range(10):
    #     test_model = keras.Model(inputs=model.layers[0].input,outputs = model.layers[i].output)
    #     test_model.save(filepath=filepath)
    #     print(i)
    model.save(filepath=filepath)
    # del model
    # model = keras.models.load_model(filepath)
    return filepath
コード例 #2
0
def emsemble(cross_epoch = 0,data_index=None,cut_shape=None,data_type=['MCIc','MCInc'],pre_dir='/home/anzeng/rhb/fmri_data',
         num_batches = 256*5,voxnet_point=None,test_size = 6,brain_map=[217],f_handle = None):
    # tf.reset_default_graph()
    keras.backend.clear_session()
    dataset = fMRI_data(data_type, data_index=data_index, varbass=False, dir=pre_dir)
    # xyz = 32
    # input_shape = [None, xyz, xyz, xyz, 1]
    # voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')

    true_shape = []
    for x in range(0, len(cut_shape), 2):
        true_shape.append(cut_shape[x + 1] - cut_shape[x] + 1)
    # with tf.Session() as sess:
    #     sess.run(tf.global_variables_initializer())
    #     voxnet.npz_saver.restore(sess,voxnet_point)
    #加载模型
    model = keras.models.load_model(voxnet_point)
    print('train_acc')
    train_fmri_evaluation = evaluation()
    train_smri_evaluation = evaluation()
    train_iter = iter(dataset.get_fmri('train')).__next__
    for i in range(100):
        img,label,_ = train_iter()
        predict,y_true = get_label(model,img,label,cut_shape,true_shape)
        predict = np.argmax(predict,axis=1)
        train_smri_evaluation += evaluation(y_predict=predict,y_true=y_true)
        if i %10 == 0 and i > 0:
            print(train_smri_evaluation)
        y_predict = ensemble_label(predict,2)
        train_fmri_evaluation += evaluation(y_predict = [y_predict],y_true=[label])
    print(train_fmri_evaluation)
    print('test_acc')
    test_fmri_evaluation = evaluation()
    test_smri_evaluation = evaluation()
    test_iter = iter(dataset.get_fmri('test')).__next__
    for i in range(test_size):
        img, label,filename = test_iter()
        predict, y_true = get_label(model, img, label, cut_shape, true_shape)
        predict = np.argmax(predict,axis=1)
        test_smri_evaluation_one = evaluation(y_predict=predict, y_true=y_true)
        test_smri_evaluation += test_smri_evaluation_one
        print(test_smri_evaluation_one)
        print(test_smri_evaluation)
        y_predict = ensemble_label(predict,2)
        test_fmri_evaluation += evaluation(y_predict=[y_predict], y_true=[label])
        print(y_predict,label,test_fmri_evaluation)
        # if y_predict != label:
        #     print(filename)
        #     f_handle.write(filename+'\n')
    if f_handle:
        f_handle.write('ensemble train:\n')
        f_handle.write(str(train_fmri_evaluation) + '\n')
        f_handle.write('ensemble test:\n')
        f_handle.write(str(test_fmri_evaluation) + '\n')
    return test_fmri_evaluation
コード例 #3
0
def svm_emsemble(cross_epoch = 0,data_index=None,cut_shape=None,data_type=['MCIc','MCInc'],pre_dir='/home/anzeng/rhb/fmri_data',
         num_batches = 256*5,voxnet_point=None,test_size = 6,brain_map=[217],f_handle = None):
    keras.backend.clear_session()
    dataset = fMRI_data(data_type, data_index=data_index, varbass=False, dir=pre_dir)

    true_shape = []
    for x in range(0, len(cut_shape), 2):
        true_shape.append(cut_shape[x + 1] - cut_shape[x] + 1)
    # 加载模型
    model = keras.models.load_model(voxnet_point)
    layer_name = 'CNN_fc2'
    model = keras.Model(inputs=model.layers[0].input, outputs=model.get_layer(layer_name).output)
    #加载模型
    train_len = 0
    for i in data_type:
        train_len += len(data_index[i]['train'])

    # print('train_acc')
    # train_fmri_evaluation = evaluation()
    # train_smri_evaluation = evaluation()
    train_iter = iter(dataset.get_fmri('train')).__next__
    train_one_len = [0]
    #训练数据
    train_data = -1
    train_label = -1
    for i in range(train_len):
        img, label, _ = train_iter()
        predict, y_true = get_label(model, img, label, cut_shape, true_shape)
        if isinstance(train_data,int):
            train_data = predict
            train_label = y_true
        else:
            train_data = np.vstack((train_data,predict))
            train_label = np.hstack((train_label,y_true))
        train_one_len.append(train_one_len[-1]+predict.shape[0])
        # train_smri_evaluation += evaluation(y_predict=predict, y_true=y_true)
        # y_predict = ensemble_label(predict, 2)
        # train_fmri_evaluation += evaluation(y_predict=[y_predict], y_true=[label])
    # print(train_fmri_evaluation)
    clf = svm.SVC(C=1.0,kernel='rbf',gamma='auto')
    clf.fit(train_data,train_label)
    train_evaluation = evaluation()
    for i in range(1,len(train_one_len),1):
        predict = clf.predict(train_data[train_one_len[i-1]:train_one_len[i]])
        # print(predict)
        y_predict = ensemble_label(predict, 2)
        y_true = train_label[train_one_len[i-1]]
        train_evaluation += evaluation(y_predict=[y_predict], y_true=[y_true])
    # predict = clf.predict(train_data)
    # train_evaluation = evaluation(y_true = train_label,y_predict = predict)
    print('svm ensemble train:')
    print(train_evaluation)

    test_evaluation = evaluation()
    # test_smri_evaluation = evaluation()
    print('svm ensemble test')
    test_iter = iter(dataset.get_fmri('test')).__next__
    for i in range(test_size):
        img, label, filename = test_iter()
        predict, y_true = get_label(model, img, label, cut_shape, true_shape)
        # test_smri_evaluation_one = evaluation(y_predict=predict, y_true=y_true)
        # test_smri_evaluation += test_smri_evaluation_one
        # print(test_smri_evaluation_one)
        # print(test_smri_evaluation)
        predict = clf.predict(predict)
        y_predict = ensemble_label(predict, 2)
        test_evaluation += evaluation(y_predict=[y_predict], y_true=[label])
        print(y_predict, label, test_evaluation)
        # if y_predict != label:
        #     print(filename)
        #     f_handle.write(filename+'\n')
    if f_handle:
        f_handle.write('svm ensemble train:\n')
        f_handle.write(str(train_evaluation)+'\n')
        f_handle.write('svm ensemble test:\n')
        f_handle.write(str(test_evaluation)+'\n')
    return test_evaluation
コード例 #4
0
def main(data_index=None,
         cut_shape=None,
         data_type=['MCIc', 'MCInc'],
         pre_dir='/home/anzeng/rhb/fmri_data',
         num_batches=256 * 5,
         voxnet_point=None,
         test_size=6,
         brain_map=[217]):
    # fr = open(cfg.output, 'w')
    tf.reset_default_graph()

    time_dim = 80  # 挑选时间片个数
    batch_size = 8

    dataset = fMRI_data(data_type,
                        data_index=data_index,
                        varbass=False,
                        dir=pre_dir)
    #SVM index

    #########################
    svm_index = {}
    train_len = 0
    test_len = 0
    for d_type in data_type:
        t_dir = os.path.join(pre_dir, d_type)
        t_len = os.listdir(t_dir)
        t_len = len(t_len)
        train_index = list(range(t_len))
        test_index = data_index[d_type]['test']
        for x in test_index:
            train_index.remove(x)
        _index = {'train': train_index, 'test': test_index}
        train_len += len(train_index)
        test_len += len(test_index)
        svm_index[d_type] = _index
    print(train_len)
    print(test_len)
    print(svm_index)
    svm_dataset = fMRI_data(data_type,
                            data_index=svm_index,
                            varbass=False,
                            dir=pre_dir)
    ##########################
    xyz = 32
    input_shape = [None, xyz, xyz, xyz, 1]
    # for i in range(3):
    #     input_shape.append(cut_shape[2 * i + 1] + 1 - cut_shape[2 * i])
    # input_shape.append(1)
    # print(input_shape)
    voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
    FCNs = Classifier_FCN(tf.placeholder(tf.float32, [None, time_dim, 50]),
                          nb_classes=2)

    data_value = [[1], [1]]

    # 创建数据
    p = dict()  # placeholders

    p['labels'] = tf.placeholder(tf.float32, [None, 2])
    p['data_value'] = tf.placeholder(tf.float32, [2, 1])

    p['Weight'] = tf.matmul(p['labels'], p['data_value'])
    p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(
        logits=FCNs[-2], labels=p['labels'])
    p['Weight'] = tf.reshape(p['Weight'], [-1])
    p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
    p['loss'] = tf.reduce_mean(p['x_loss'])
    p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in FCNs.kernels])

    p['prediction'] = tf.argmax(FCNs[-1], 1)
    p['y_true'] = tf.argmax(p['labels'], 1)
    p['correct_prediction'] = tf.equal(p['prediction'], p['y_true'])
    p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'],
                                           tf.float32))

    p['learning_rate'] = tf.placeholder(tf.float32)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        p['train'] = tf.train.AdamOptimizer(p['learning_rate'],
                                            epsilon=1e-3).minimize(p['loss'])
    p['weights_decay'] = tf.train.GradientDescentOptimizer(
        p['learning_rate']).minimize(p['l2_loss'])

    # p['test_error'] = tf.placeholder(tf.float32)
    # 超参数设置

    initial_learning_rate = 0.01
    min_learning_rate = 0.000001
    learning_rate_decay_limit = 0.0001

    num_batches_per_epoch = len(dataset.train) / float(batch_size)
    learning_decay = 10 * num_batches_per_epoch
    weights_decay_after = 5 * num_batches_per_epoch

    checkpoint_num = 0
    learning_step = 0
    min_loss = 1e308

    if voxnet_point:
        cfg.voxnet_checkpoint = voxnet_point

    accuracy_filename = os.path.join(cfg.fcn_checkpoint_dir, 'accuracies.txt')
    if not os.path.isdir(cfg.fcn_checkpoint_dir):
        os.mkdir(cfg.fcn_checkpoint_dir)

    if not os.path.exists(accuracy_filename):
        with open(accuracy_filename, 'a') as f:
            f.write('')
    with open(accuracy_filename, 'a') as f:
        f.write(str(brain_map) + '\n')

    #返回值
    test_evaluation = evaluation()
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint)
        #voxnet赋值
        input_shape[0] = 1
        voxnet_data = np.ones(input_shape, np.float32)
        input_shape[0] = -1
        for batch_index in range(num_batches):
            start = time.time()
            # learning_rate = max(min_learning_rate,
            #                     initial_learning_rate * 0.5 ** (learning_step / learning_decay))
            learning_rate = 0.0001
            learning_step += 1

            if batch_index > weights_decay_after and batch_index % 256 == 0:
                session.run(p['weights_decay'], feed_dict=feed_dict)

            voxs, labels = dataset.train.oversampling.get_time_batch(
                session,
                voxnet,
                cut_shape,
                time_dim=time_dim,
                batch_size=batch_size)
            feed_dict = {
                FCNs[0]: voxs,
                voxnet[0]: voxnet_data,
                voxnet.keep_prob: 1.0,
                FCNs.keep_prob: 0.7,
                p['labels']: labels,
                p['learning_rate']: learning_rate,
                FCNs.training: True,
                p['data_value']: data_value
            }

            session.run(p['train'], feed_dict=feed_dict)

            if batch_index and batch_index % 32 == 0:

                print("{} batch: {}".format(datetime.datetime.now(),
                                            batch_index))
                print('learning rate: {}'.format(learning_rate))
                # fr.write("{} batch: {}".format(datetime.datetime.now(), batch_index))
                # fr.write('learning rate: {}'.format(learning_rate))

                feed_dict[FCNs.training] = False
                loss = session.run(p['loss'], feed_dict=feed_dict)
                print('loss: {}'.format(loss))

                if (batch_index and loss > 1.5 * min_loss
                        and learning_rate > learning_rate_decay_limit):
                    min_loss = loss
                    learning_step *= 1.2
                    print("decreasing learning rate...")
                min_loss = min(loss, min_loss)

            if batch_index and batch_index % 16 == 0:
                num_accuracy_batches = 20
                train_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.train.random_sampling.get_time_batch(
                        session,
                        voxnet,
                        cut_shape,
                        time_dim=time_dim,
                        batch_size=batch_size)
                    feed_dict = {
                        FCNs[0]: voxs,
                        voxnet[0]: voxnet_data,
                        voxnet.keep_prob: 1.0,
                        FCNs.keep_prob: 1.0,
                        p['labels']: labels,
                        FCNs.training: False
                    }
                    predictions, y_true = session.run(
                        [p['prediction'], p['y_true']], feed_dict=feed_dict)
                    train_evaluation += evaluation(y_true=y_true,
                                                   y_predict=predictions)
                print('training accuracy \n' + str(train_evaluation))
                num_accuracy_batches = test_size
                test_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.test.random_sampling.get_time_batch(
                        session,
                        voxnet,
                        cut_shape,
                        time_dim=time_dim,
                        batch_size=batch_size)
                    feed_dict = {
                        FCNs[0]: voxs,
                        voxnet[0]: voxnet_data,
                        voxnet.keep_prob: 1.0,
                        FCNs.keep_prob: 1.0,
                        p['labels']: labels,
                        FCNs.training: False
                    }
                    predictions, y_true = session.run(
                        [p['prediction'], p['y_true']], feed_dict=feed_dict)
                    test_evaluation += evaluation(y_true=y_true,
                                                  y_predict=predictions)
                    print(test_evaluation)
                print('test accuracy \n' + str(test_evaluation))
                with open(accuracy_filename, 'a') as f:
                    f.write('checkpoint_num:' + str(checkpoint_num) + ':\n')
                    f.write('train:\n' + str(train_evaluation) + '\n')
                    f.write('test:\n' + str(test_evaluation) + '\n')
                if batch_index % 64 or train_evaluation.ACC >= 0.8 == 0:
                    ######SVM分类器####################
                    svm_feature = np.zeros((train_len + test_len, 128))
                    svm_label = np.zeros(train_len + test_len)
                    for x in range(train_len):
                        voxs, labels = svm_dataset.train.random_sampling.get_time_batch(
                            session,
                            voxnet,
                            cut_shape,
                            time_dim=time_dim,
                            batch_size=1)
                        feed_dict = {
                            FCNs[0]: voxs,
                            voxnet[0]: voxnet_data,
                            voxnet.keep_prob: 1.0,
                            FCNs.keep_prob: 1.0,
                            p['labels']: labels,
                            FCNs.training: False
                        }
                        feature, y_true = session.run(
                            [FCNs['gap'], p['y_true']], feed_dict=feed_dict)
                        feature = np.reshape(feature, [1, 128])
                        svm_feature[x] = feature
                        # print(svm_feature[x])
                        svm_label[x] = y_true
                    for x in range(test_len):
                        voxs, labels = svm_dataset.test.random_sampling.get_time_batch(
                            session,
                            voxnet,
                            cut_shape,
                            time_dim=time_dim,
                            batch_size=1)
                        feed_dict = {
                            FCNs[0]: voxs,
                            voxnet[0]: voxnet_data,
                            voxnet.keep_prob: 1.0,
                            FCNs.keep_prob: 1.0,
                            p['labels']: labels,
                            FCNs.training: False
                        }
                        feature, y_true = session.run(
                            [FCNs['gap'], p['y_true']], feed_dict=feed_dict)
                        feature = np.reshape(feature, [1, 128])
                        svm_feature[train_len + x] = feature
                        svm_label[train_len + x] = y_true
                    # print(svm_feature[0:train_len])
                    # print(svm_label[0:train_len])
                    clf = svm.SVC(C=1.0, kernel='rbf', gamma='auto')
                    clf.fit(svm_feature[0:train_len], svm_label[0:train_len])
                    predictions = clf.predict(svm_feature)
                    svm_train_evaluation = evaluation(
                        y_true=svm_label[:train_len],
                        y_predict=predictions[:train_len])
                    svm_test_evaluation = evaluation(
                        y_true=svm_label[train_len:],
                        y_predict=predictions[train_len:])
                    print('svm_train:\n' + str(svm_train_evaluation))
                    print('svm_test:\n' + str(svm_test_evaluation))
                    with open(accuracy_filename, 'a') as f:
                        f.write('svm_train:\n' + str(svm_train_evaluation) +
                                '\n')
                        f.write('svm_test:\n' + str(svm_test_evaluation) +
                                '\n')
                    #################################################

                # fr.write('test accuracy: {}'.format(test_accuracy))

                if batch_index % 128 == 0 or train_evaluation.ACC >= 0.85:
                    print('saving checkpoint {}...'.format(checkpoint_num))
                    filename = 'cx-{}.npz'.format(checkpoint_num)
                    filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
                    FCNs.npz_saver.save(session, filename)
                    print('checkpoint saved!')
                    checkpoint_num += 1
                    if train_evaluation.ACC >= 0.85:
                        break
            end = time.time()
            print('time:', (end - start) / 60)
    return test_evaluation
コード例 #5
0
def syn_train(*argv):

    input_weight = [2, 1, 1, 1]
    data_value = [[1.0], [1.0]]
    time_dim = 80  # 挑选时间片个数
    batch_size = 8

    brain_map = [217, 218, 219]
    cut_shape = [100, 0, 100, 0, 100, 0]
    mask = nib.load(
        '/home/anzeng/rhb/fmri/fMRI-deeping-learning/BN_Atlas_246_3mm.nii')
    mask = mask.get_fdata()
    # 获取截取的sMRI大小
    for x in brain_map:
        tmp = np.where(mask == x)
        for i in range(3):
            cut_shape[2 * i] = min(cut_shape[2 * i], np.min(tmp[i]))
            cut_shape[2 * i + 1] = max(cut_shape[2 * i + 1], np.max(tmp[i]))
    print(brain_map, cut_shape)

    dataset = fMRI_data(['AD', 'NC'],
                        dir='/home/anzeng/rhb/fmri_data',
                        batch_mode='random',
                        varbass=cfg.varbass)
    input_shape = [None, 32, 32, 32, 1]
    # for i in range(3):
    #     input_shape.append(cut_shape[2 * i + 1] + 1 - cut_shape[2 * i])
    # input_shape.append(1)
    print(input_shape)
    # input_shape=[40,2,2,2,1]
    voxnet = VoxNet(input_shape=input_shape, voxnet_type='all_conv')
    FCN_input = tf.reshape(voxnet['gap'], (-1, time_dim, 128))
    print(FCN_input)
    FCNs = Classifier_FCN(FCN_input, nb_classes=2)
    # 创建数据
    p = dict()  # placeholders

    p['labels'] = tf.placeholder(tf.float32, [None, 2])
    p['data_value'] = tf.placeholder(tf.float32, [2, 1])
    p['input_pw'] = tf.placeholder(tf.float32, [None])  #预测权值

    p['Weight'] = tf.matmul(p['labels'], p['data_value'])
    p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(
        logits=FCNs[-2], labels=p['labels'])
    p['Weight'] = tf.reshape(p['Weight'], [-1])
    p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
    p['loss'] = tf.reduce_mean(p['x_loss'])
    p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in FCNs.kernels])

    # 预测时进行集成
    p['p_w'] = tf.reshape(p['input_pw'], [-1, 1])
    p['sum_w'] = tf.reduce_sum(p['p_w'])
    p['test_prediction'] = tf.cast(tf.argmax(FCNs[-1], 1), tf.float32)
    p['test_prediction'] = tf.reshape(p['test_prediction'], [-1, 4])
    p['test_prediction'] = tf.matmul(p['test_prediction'], p['p_w'])
    p['test_prediction'] = tf.round(tf.divide(p['test_prediction'],
                                              p['sum_w']))

    p['prediction'] = tf.argmax(FCNs[-1], 1)
    p['y_true'] = tf.argmax(p['labels'], 1)
    p['correct_prediction'] = tf.equal(p['prediction'], p['y_true'])
    p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'],
                                           tf.float32))

    p['learning_rate'] = tf.placeholder(tf.float32)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        p['train'] = tf.train.AdamOptimizer(p['learning_rate'],
                                            epsilon=1e-3).minimize(p['loss'])
    p['weights_decay'] = tf.train.GradientDescentOptimizer(
        p['learning_rate']).minimize(p['l2_loss'])

    # p['test_error'] = tf.placeholder(tf.float32)
    # 超参数设置

    num_batches = 2147483647

    initial_learning_rate = 0.01
    min_learning_rate = 0.0001
    learning_rate_decay_limit = 0.0001

    num_batches_per_epoch = len(dataset.train) / float(batch_size)
    learning_decay = 10 * num_batches_per_epoch
    weights_decay_after = 5 * num_batches_per_epoch

    checkpoint_num = cfg.checkpoint_start_num
    learning_step = 0
    min_loss = 1e308

    accuracy_filename = os.path.join(cfg.fcn_checkpoint_dir, 'accuracies.txt')
    if not os.path.isdir(cfg.fcn_checkpoint_dir):
        os.mkdir(cfg.fcn_checkpoint_dir)

    if not os.path.exists(accuracy_filename):
        with open(accuracy_filename, 'a') as f:
            f.write('')
    with open(accuracy_filename, 'a') as f:
        f.write(str(brain_map) + '\n')
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        if cfg.istraining:
            voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint)
            FCNs.npz_saver.restore(session, cfg.fcn_checkpoint)
        #voxnet赋值
        for batch_index in range(num_batches):
            start = time.time()
            learning_rate = max(
                min_learning_rate,
                initial_learning_rate * 0.8**(learning_step / learning_decay))
            learning_step += 1

            if batch_index > weights_decay_after and batch_index % 256 == 0:
                session.run(p['weights_decay'], feed_dict=feed_dict)

            voxs, labels = dataset.train.oversampling.get_fmri_brain(
                cut_shape, batch_size, time_dim)
            # voxs = np.zeros(input_shape)
            # labels = np.zeros((8,2))
            feed_dict = {
                voxnet[0]: voxs,
                p['labels']: labels,
                p['learning_rate']: learning_rate,
                FCNs.training: True,
                voxnet.training: True,
                p['data_value']: data_value
            }
            # print(voxs.shape,labels.shape)
            session.run(p['train'], feed_dict=feed_dict)

            if batch_index and batch_index % 32 == 0:

                print("{} batch: {}".format(datetime.datetime.now(),
                                            batch_index))
                print('learning rate: {}'.format(learning_rate))
                # fr.write("{} batch: {}".format(datetime.datetime.now(), batch_index))
                # fr.write('learning rate: {}'.format(learning_rate))

                feed_dict[FCNs.training] = False
                loss = session.run(p['loss'], feed_dict=feed_dict)
                print('loss: {}'.format(loss))

                if (batch_index and loss > 1.5 * min_loss
                        and learning_rate > learning_rate_decay_limit):
                    min_loss = loss
                    learning_step *= 1.2
                    print("decreasing learning rate...")
                min_loss = min(loss, min_loss)

            if batch_index and batch_index % 16 == 0:
                num_accuracy_batches = 20
                train_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.train.random_sampling.get_fmri_brain(
                        cut_shape, 5, time_dim)
                    feed_dict = {
                        voxnet[0]: voxs,
                        p['labels']: labels,
                        FCNs.training: False,
                        voxnet.training: False,
                        p['input_pw']: input_weight
                    }
                    predictions, y_true = session.run(
                        [p['test_prediction'], p['y_true']],
                        feed_dict=feed_dict)
                    train_evaluation += evaluation(y_true=y_true,
                                                   y_predict=predictions)
                    print(train_evaluation)
                print('train accuracy \n' + str(train_evaluation))
                num_accuracy_batches = 27
                test_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.test.random_sampling.get_fmri_brain(
                        cut_shape, 1, time_dim)
                    feed_dict = {
                        voxnet[0]: voxs,
                        p['labels']: labels,
                        FCNs.training: False,
                        voxnet.training: False,
                        p['input_pw']: input_weight
                    }
                    predictions, y_true = session.run(
                        [p['test_prediction'], p['y_true']],
                        feed_dict=feed_dict)
                    test_evaluation += evaluation(y_true=y_true,
                                                  y_predict=predictions)
                    print(test_evaluation)
                print('test accuracy \n' + str(test_evaluation))
                # fr.write('test accuracy: {}'.format(test_accuracy))
                with open(accuracy_filename, 'a') as f:
                    f.write(str(checkpoint_num) + ':\n')
                    f.write(str(train_evaluation) + '\n')
                    f.write(str(test_evaluation) + '\n')
                if batch_index % 256 == 0:
                    print('saving checkpoint {}...'.format(checkpoint_num))
                    filename = 'voxnet-{}.npz'.format(checkpoint_num)
                    filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
                    voxnet.npz_saver.save(session, filename)
                    filename = 'fcn-{}.npz'.format(checkpoint_num)
                    filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
                    FCNs.npz_saver.save(session, filename)
                    print('checkpoint saved!')
                    checkpoint_num += 1
            end = time.time()
            print('time:', (end - start) / 60)
コード例 #6
0
def main(data_index=None,
         brain_map=[218],
         data_type=['MCIc', 'MCInc'],
         pre_dir='/home/anzeng/rhb/fmri_data',
         num_batches=512 * 5 + 1,
         test_size=6):
    tf.reset_default_graph()
    dataset = fMRI_data(data_index=data_index,
                        data_type=data_type,
                        varbass=False,
                        dir=pre_dir)

    # 超参数设置

    num_batches = num_batches
    batch_size = 16

    initial_learning_rate = 0.0001
    min_learning_rate = 0.000001
    learning_rate_decay_limit = 0.0001

    num_batches_per_epoch = len(dataset.train) / float(batch_size)
    learning_decay = 10 * num_batches_per_epoch
    weights_decay_after = 5 * num_batches_per_epoch

    feature_index = brain_map
    data_value = [[1.0], [1.0]]
    checkpoint_num = 0
    learning_step = 0
    min_loss = 1e308
    time_dim = 80
    varbass = True

    #模型框架
    mask = nib.load(
        '/home/anzeng/rhb/fmri/fMRI-deeping-learning/BN_Atlas_246_3mm.nii')
    mask = mask.get_fdata()
    f_len = 0
    for i in feature_index:
        f_len += len(np.where(mask == i)[0])
    FCNs = Classifier_FCN(tf.placeholder(tf.float32, [None, time_dim, 25]),
                          nb_classes=2)

    # 创建数据
    p = dict()  # placeholders

    p['labels'] = tf.placeholder(tf.float32, [None, 2])
    p['data_value'] = tf.placeholder(tf.float32, [2, 1])

    p['Weight'] = tf.matmul(p['labels'], p['data_value'])
    p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(
        logits=FCNs[-2], labels=p['labels'])
    p['Weight'] = tf.reshape(p['Weight'], [-1])
    p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
    p['loss'] = tf.reduce_mean(p['x_loss'])
    p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in FCNs.kernels])

    p['prediction'] = tf.argmax(FCNs[-1], 1)
    p['y_true'] = tf.argmax(p['labels'], 1)
    p['correct_prediction'] = tf.equal(tf.argmax(FCNs[-1], 1),
                                       tf.argmax(p['labels'], 1))
    p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'],
                                           tf.float32))

    p['learning_rate'] = tf.placeholder(tf.float32)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        p['train'] = tf.train.AdamOptimizer(0.00001,
                                            epsilon=1e-3).minimize(p['loss'])
    p['weights_decay'] = tf.train.GradientDescentOptimizer(0.0001).minimize(
        p['l2_loss'])

    accuracy_filename = os.path.join(cfg.fcn_checkpoint_dir, 'accuracies.txt')
    if not os.path.isdir(cfg.fcn_checkpoint_dir):
        os.mkdir(cfg.fcn_checkpoint_dir)

    with open(accuracy_filename, 'a') as f:
        f.write(str(feature_index))

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())

        # voxnet_data = np.ones([1,61,73,61,1],np.float32)
        for batch_index in range(num_batches):

            # learning_rate = max(min_learning_rate,
            #                     initial_learning_rate * 0.5 ** (learning_step / learning_decay))
            learning_step += 1

            if batch_index > weights_decay_after and batch_index % 256 == 0:
                session.run(p['weights_decay'], feed_dict=feed_dict)

            voxs, labels = dataset.train.oversampling.get_brain_batch(
                mask,
                batch_size=batch_size,
                time_dim=time_dim,
                feature_index=feature_index)
            feed_dict = {
                FCNs[0]: voxs,
                p['labels']: labels,
                FCNs.training: True,
                p['data_value']: data_value
            }

            Weight, cross_loss, x_loss, _ = session.run(
                [p['Weight'], p['cross_loss'], p['x_loss'], p['train']],
                feed_dict=feed_dict)
            # print("Weight\n",Weight)
            # print("cross_loss\n",cross_loss)
            # print("x_loss\n",x_loss)
            if batch_index and batch_index % 64 == 0:

                print("{} batch: {}".format(datetime.datetime.now(),
                                            batch_index))
                # print('learning rate: {}'.format(learning_rate))

                feed_dict[FCNs.training] = False
                loss = session.run(p['loss'], feed_dict=feed_dict)
                print('loss: {}'.format(loss))

                # if (batch_index and loss > 1.5 * min_loss and
                #         learning_rate > learning_rate_decay_limit):
                #     min_loss = loss
                #     learning_step *= 1.2
                #     print("decreasing learning rate...")
                # min_loss = min(loss, min_loss)

            if batch_index and batch_index % 64 == 0:
                num_accuracy_batches = 50
                train_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.train.random_sampling.get_brain_batch(
                        mask,
                        batch_size=batch_size,
                        time_dim=time_dim,
                        feature_index=feature_index)
                    feed_dict = {
                        FCNs[0]: voxs,
                        p['labels']: labels,
                        FCNs.training: False
                    }
                    start_time = time.time()
                    predictions, y_true = session.run(
                        [p['prediction'], p['y_true']], feed_dict=feed_dict)
                    train_evaluation += evaluation(y_true=y_true,
                                                   y_predict=predictions)
                    end_time = time.time()
                    print('total time: %f' % ((end_time - start_time) / 60))
                    print(train_evaluation)
                print('training accuracy \n' + str(train_evaluation))
                # num_accuracy_batches = 10
                print('loss: {}'.format(loss))
                num_accuracy_batches = test_size
                test_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.test.random_sampling.get_brain_batch(
                        mask,
                        batch_size=1,
                        time_dim=time_dim,
                        feature_index=feature_index)
                    feed_dict = {
                        FCNs[0]: voxs,
                        p['labels']: labels,
                        FCNs.training: False
                    }
                    y_true, prediction = session.run(
                        [p['y_true'], p['prediction']], feed_dict=feed_dict)
                    test_evaluation += evaluation(y_true=y_true,
                                                  y_predict=prediction)
                    # print(y_true, prediction)
                    print(test_evaluation)
                print('test accuracy \n' + str(test_evaluation))
                with open(accuracy_filename, 'a') as f:
                    f.write(str(checkpoint_num) + ':\n')
                    f.write(str(train_evaluation) + '\n')
                    f.write(str(test_evaluation) + '\n')
                # fr.write('test accuracy: {}'.format(test_accuracy))
                if batch_index % 1024 == 0:
                    print('saving checkpoint {}...'.format(checkpoint_num))
                    filename = 'cx-{}.npz'.format(checkpoint_num)
                    filename = os.path.join(cfg.fcn_checkpoint_dir, filename)
                    FCNs.npz_saver.save(session, filename)
                    print('checkpoint saved!')
                    checkpoint_num += 1
                if train_evaluation.ACC > 0.95 and batch_index % 1024 == 0:
                    break
    return test_evaluation
コード例 #7
0
def main(*argv):

    dataset = fMRI_data(['MCIc', 'MCInc'],
                        dir='/home/anzeng/rhb/fmri_data/MRI_data/217',
                        batch_mode='random',
                        varbass=True)
    voxnet = VoxNet()

    #数据权值
    data_value = [[1], [1]]
    #创建数据
    p = dict()  # placeholders

    p['labels'] = tf.placeholder(tf.float32, [None, 2])
    p['data_value'] = tf.placeholder(tf.float32, [2, 1])

    p['Weight'] = tf.matmul(p['labels'], p['data_value'])
    p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(
        logits=voxnet[-2], labels=p['labels'])
    p['Weight'] = tf.reshape(p['Weight'], [-1])
    p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
    p['loss'] = tf.reduce_mean(p['x_loss'])
    p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in voxnet.kernels])

    p['prediction'] = tf.argmax(voxnet[-1], 1)
    p['y_true'] = tf.argmax(p['labels'], 1)
    p['correct_prediction'] = tf.equal(tf.argmax(voxnet[-1], 1),
                                       tf.argmax(p['labels'], 1))
    p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'],
                                           tf.float32))

    p['learning_rate'] = tf.placeholder(tf.float32)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        p['train'] = tf.train.AdamOptimizer(p['learning_rate'],
                                            epsilon=1e-3).minimize(p['loss'])
    p['weights_decay'] = tf.train.GradientDescentOptimizer(
        p['learning_rate']).minimize(p['l2_loss'])

    # Hyperparameters

    num_batches = 2147483647
    batch_size = 16

    initial_learning_rate = 0.01
    min_learning_rate = 0.00001
    learning_rate_decay_limit = 0.0001

    num_batches_per_epoch = len(dataset.train) / float(batch_size)
    learning_decay = 10 * num_batches_per_epoch
    weights_decay_after = 5 * num_batches_per_epoch

    checkpoint_num = 0
    learning_step = 0
    min_loss = 1e308

    accuracy_filename = os.path.join(cfg.checkpoint_dir, 'accuracies.txt')
    if not os.path.isdir(cfg.checkpoint_dir):
        os.mkdir(cfg.checkpoint_dir)

    with open(accuracy_filename, 'w') as f:
        f.write('')

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        if cfg.istraining:
            voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint_dir)
        for batch_index in range(num_batches):

            learning_rate = max(
                min_learning_rate,
                initial_learning_rate * 0.5**(learning_step / learning_decay))
            learning_step += 1

            if batch_index > weights_decay_after and batch_index % 256 == 0:
                session.run(p['weights_decay'], feed_dict=feed_dict)

            voxs, labels = dataset.train.get_batch(batch_size)
            feed_dict = {
                voxnet[0]: voxs,
                p['labels']: labels,
                p['learning_rate']: learning_rate,
                voxnet.training: True,
                p['data_value']: data_value
            }

            session.run(p['train'], feed_dict=feed_dict)

            if batch_index and batch_index % 512 == 0:

                print("{} batch: {}".format(datetime.datetime.now(),
                                            batch_index))
                print('learning rate: {}'.format(learning_rate))

                feed_dict[voxnet.training] = False
                loss = session.run(p['loss'], feed_dict=feed_dict)
                print('loss: {}'.format(loss))

                if (batch_index and loss > 1.5 * min_loss
                        and learning_rate > learning_rate_decay_limit):
                    min_loss = loss
                    learning_step *= 1.2
                    print("decreasing learning rate...")
                min_loss = min(loss, min_loss)

            if batch_index and batch_index % 128 == 0:
                num_accuracy_batches = 30
                total_accuracy = 0
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.train.get_batch(batch_size)
                    feed_dict = {
                        voxnet[0]: voxs,
                        p['labels']: labels,
                        voxnet.training: False
                    }
                    total_accuracy += session.run(p['accuracy'],
                                                  feed_dict=feed_dict)
                training_accuracy = total_accuracy / num_accuracy_batches
                print('training accuracy: {}'.format(training_accuracy))
                num_accuracy_batches = 90
                total_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.test.get_batch(batch_size)
                    feed_dict = {
                        voxnet[0]: voxs,
                        p['labels']: labels,
                        voxnet.training: False
                    }
                    predictions, y_true = session.run(
                        [p['prediction'], p['y_true']], feed_dict=feed_dict)
                    total_evaluation += evaluation(y_true=y_true,
                                                   y_predict=predictions)
                    # print(y_true, predictions)
                    print(total_evaluation)
                test_evaluation = total_evaluation / num_accuracy_batches
                print('test accuracy \n' + str(test_evaluation))
                with open(accuracy_filename, 'a') as f:
                    f.write(' '.join(
                        map(str, (checkpoint_num, training_accuracy,
                                  test_evaluation))) + '\n')
                if batch_index % 2048 == 0:
                    print('saving checkpoint {}...'.format(checkpoint_num))
                    filename = 'cx-{}.npz'.format(checkpoint_num)
                    filename = os.path.join(cfg.checkpoint_dir, filename)
                    voxnet.npz_saver.save(session, filename)
                    print('checkpoint saved!')
                    checkpoint_num += 1
コード例 #8
0
def main(cross_epoch=0,
         data_index=None,
         brain_map=None,
         cut_shape=None,
         data_type=['MCIc', 'MCInc'],
         pre_dir='/home/anzeng/rhb/fmri_data',
         num_batches=512 * 5 + 1,
         test_size=6):
    tf.reset_default_graph()

    if cut_shape == None:
        brain_map = [212, 213, 214, 215, 216, 217, 218]
        cut_shape = [100, 0, 100, 0, 100, 0]
        mask = nib.load(
            '/home/anzeng/rhb/fmri/fMRI-deeping-learning/BN_Atlas_246_3mm.nii')
        mask = mask.get_fdata()
        #获取截取的sMRI大小
        for x in brain_map:
            tmp = np.where(mask == x)
            for i in range(3):
                cut_shape[2 * i] = min(cut_shape[2 * i], np.min(tmp[i]))
                cut_shape[2 * i + 1] = max(cut_shape[2 * i + 1],
                                           np.max(tmp[i]))
        print(cut_shape)

    xyz = 32
    dataset = fMRI_data(data_type,
                        data_index=data_index,
                        dir=pre_dir,
                        batch_mode='random',
                        varbass=cfg.varbass)
    voxnet = VoxNet(input_shape=[None, xyz, xyz, xyz, 1], voxnet_type='cut')

    #数据权值
    data_value = [[1], [1]]
    #创建数据
    p = dict()  # placeholders

    p['labels'] = tf.placeholder(tf.float32, [None, 2])
    p['data_value'] = tf.placeholder(tf.float32, [2, 1])

    p['Weight'] = tf.matmul(p['labels'], p['data_value'])
    p['cross_loss'] = tf.nn.softmax_cross_entropy_with_logits(
        logits=voxnet[-2], labels=p['labels'])
    p['Weight'] = tf.reshape(p['Weight'], [-1])
    p['x_loss'] = tf.multiply(p['Weight'], p['cross_loss'])
    p['loss'] = tf.reduce_mean(p['x_loss'])
    p['l2_loss'] = tf.add_n([tf.nn.l2_loss(w) for w in voxnet.kernels])
    p['loss'] = p['loss'] + 0.0001 * p['l2_loss']

    p['prediction'] = tf.argmax(voxnet[-1], 1)
    p['y_true'] = tf.argmax(p['labels'], 1)
    p['correct_prediction'] = tf.equal(tf.argmax(voxnet[-1], 1),
                                       tf.argmax(p['labels'], 1))
    p['accuracy'] = tf.reduce_mean(tf.cast(p['correct_prediction'],
                                           tf.float32))

    p['learning_rate'] = tf.placeholder(tf.float32)
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        p['train'] = tf.train.AdamOptimizer(p['learning_rate'],
                                            epsilon=1e-3).minimize(p['loss'])
    p['weights_decay'] = tf.train.GradientDescentOptimizer(
        p['learning_rate']).minimize(p['l2_loss'])

    # Hyperparameters
    batch_size = 50

    initial_learning_rate = 0.0001
    min_learning_rate = 0.00001
    learning_rate_decay_limit = 0.0001

    num_batches_per_epoch = len(dataset.train) / batch_size
    learning_decay = 10 * num_batches_per_epoch
    weights_decay_after = 5 * num_batches_per_epoch

    checkpoint_num = 0
    learning_step = 0
    min_loss = 1e308

    wait = 0
    paitence = 10
    accuracy_filename = os.path.join(cfg.voxnet_checkpoint_dir,
                                     'accuracies.txt')
    if not os.path.isdir(cfg.voxnet_checkpoint_dir):
        os.mkdir(cfg.voxnet_checkpoint_dir)

    with open(accuracy_filename, 'a') as f:
        f.write(str(brain_map) + '\n')

    filename = ""
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        if cfg.istraining:
            voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint)
        for batch_index in range(num_batches):

            # learning_rate = max(min_learning_rate,
            #                     initial_learning_rate * 0.8 ** (learning_step / learning_decay))
            learning_rate = 0.0001
            learning_step += 1

            # if batch_index > weights_decay_after and batch_index % 256 == 0:
            #     session.run(p['weights_decay'], feed_dict=feed_dict)

            voxs, labels = dataset.train.oversampling.get_smri_batch(
                cut_shape, batch_size)
            feed_dict = {
                voxnet[0]: voxs,
                p['labels']: labels,
                voxnet.keep_prob: 0.3,
                p['learning_rate']: learning_rate,
                voxnet.training: True,
                p['data_value']: data_value,
            }

            session.run(p['train'], feed_dict=feed_dict)

            if batch_index and batch_index % 512 == 0:

                print("{} batch: {}".format(datetime.datetime.now(),
                                            batch_index))
                print('learning rate: {}'.format(learning_rate))

                feed_dict[voxnet.training] = False
                loss = session.run(p['loss'], feed_dict=feed_dict)
                print('loss: {}'.format(loss))

                if (batch_index and loss > 1.5 * min_loss
                        and learning_rate > learning_rate_decay_limit):
                    min_loss = loss
                    learning_step *= 1.2
                    print("decreasing learning rate...")
                min_loss = min(loss, min_loss)

            if batch_index and batch_index % 128 == 0:
                num_accuracy_batches = 30
                # train_accuracy = 0
                training_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.train.random_sampling.get_smri_batch(
                        cut_shape, batch_size)
                    feed_dict = {
                        voxnet[0]: voxs,
                        p['labels']: labels,
                        voxnet.training: False,
                        voxnet.keep_prob: 1.0
                    }
                    start_time = time.time()
                    predictions, y_true = session.run(
                        [p['prediction'], p['y_true']], feed_dict=feed_dict)
                    training_evaluation += evaluation(y_true=y_true,
                                                      y_predict=predictions)
                    end_time = time.time()
                    print('total time: %f' % ((end_time - start_time) / 60))
                    print(training_evaluation)
                # training_accuracy = total_accuracy / num_accuracy_batgetches
                print('training accuracy \n' + str(training_evaluation))
                num_accuracy_batches = test_size
                test_evaluation = evaluation()
                for x in range(num_accuracy_batches):
                    voxs, labels = dataset.test.random_sampling.get_smri_batch(
                        cut_shape, batch_size)
                    feed_dict = {
                        voxnet[0]: voxs,
                        p['labels']: labels,
                        voxnet.training: False,
                        voxnet.keep_prob: 1.0
                    }
                    predictions, y_true = session.run(
                        [p['prediction'], p['y_true']], feed_dict=feed_dict)
                    test_evaluation += evaluation(y_true=y_true,
                                                  y_predict=predictions)
                    # print(y_true, predictions)
                    print(test_evaluation)
                print('test accuracy \n' + str(test_evaluation))
                with open(accuracy_filename, 'a') as f:
                    f.write(str(checkpoint_num) + ':\n')
                    f.write(str(training_evaluation) + '\n')
                    f.write(str(test_evaluation) + '\n')

                if training_evaluation.ACC >= 0.95:
                    wait += 1
                    if wait >= paitence:
                        filename = 'cx-{}.npz'.format(cross_epoch)
                        filename = os.path.join(cfg.voxnet_checkpoint_dir,
                                                filename)
                        voxnet.npz_saver.save(session, filename)
                        print('checkpoint saved!')
                        return filename
                else:
                    wait = 0
                # if batch_index % 1024 == 0 or training_evaluation.ACC >= 0.9:
                #     print('saving checkpoint {}...'.format(checkpoint_num))
                #     filename = 'cx-{}.npz'.format(checkpoint_num)
                #     filename = os.path.join(cfg.voxnet_checkpoint_dir,filename)
                #     voxnet.npz_saver.save(session, filename)
                #     print('checkpoint saved!')
                #     checkpoint_num += 1
                #     if training_evaluation.ACC >= 0.9:
                #         break
        filename = 'cx-{}.npz'.format(cross_epoch)
        filename = os.path.join(cfg.voxnet_checkpoint_dir, filename)
        voxnet.npz_saver.save(session, filename)
        print('checkpoint saved!')
    return filename
コード例 #9
0
def main(*argv):

    data_value = [[1.0], [1.0]]
    time_dim = 80  # 挑选时间片个数
    batch_size = 8

    brain_map = [219]
    cut_shape = [100, 0, 100, 0, 100, 0]
    mask = nib.load(
        '/home/anzeng/rhb/fmri/fMRI-deeping-learning/BN_Atlas_246_3mm.nii')
    mask = mask.get_fdata()
    # 获取截取的sMRI大小
    for x in brain_map:
        tmp = np.where(mask == x)
        for i in range(3):
            cut_shape[2 * i] = min(cut_shape[2 * i], np.min(tmp[i]))
            cut_shape[2 * i + 1] = max(cut_shape[2 * i + 1], np.max(tmp[i]))
    print(cut_shape)

    dataset = fMRI_data(['AD', 'NC'],
                        dir='/home/anzeng/rhb/fmri_data',
                        batch_mode='random',
                        varbass=cfg.varbass)
    input_shape = [time_dim * batch_size]
    for i in range(3):
        input_shape.append(cut_shape[2 * i + 1] + 1 - cut_shape[2 * i])
    input_shape.append(1)
    print(input_shape)
    # input_shape=[40,2,2,2,1]
    voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
    FCN_input = tf.reshape(voxnet['gap'], (-1, time_dim, 128))
    print(FCN_input)
    FCNs = Classifier_FCN(FCN_input, nb_classes=2)
    # 创建数据
    p = dict()  # placeholders

    p['labels'] = tf.placeholder(tf.float32, [None, 2])
    p['prediction'] = tf.argmax(FCNs[-1], 1)
    p['y_true'] = tf.argmax(p['labels'], 1)

    # p['test_error'] = tf.placeholder(tf.float32)
    # 超参数设置

    num_batches = 2147483647

    initial_learning_rate = 0.01
    min_learning_rate = 0.0001
    learning_rate_decay_limit = 0.0001

    num_batches_per_epoch = len(dataset.train) / float(batch_size)
    learning_decay = 10 * num_batches_per_epoch
    weights_decay_after = 5 * num_batches_per_epoch

    checkpoint_num = 0
    learning_step = 0
    min_loss = 1e308

    accuracy_filename = os.path.join(cfg.checkpoint_dir, 'accuracies.txt')

    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint_dir)
        FCNs.npz_saver.restore(session, cfg.fcn_checkpoint_dir)
        # voxnet.npz_saver.restore(session, cfg.voxnet_checkpoint_dir)
        #voxnet赋值
        num_accuracy_batches = 90
        total_evaluation = evaluation()
        for x in range(num_accuracy_batches):
            voxs, labels = dataset.train.random_sampling.get_fmri_brain(
                cut_shape, batch_size, time_dim)
            feed_dict = {
                voxnet[0]: voxs,
                p['labels']: labels,
                FCNs.training: False,
                voxnet.training: False
            }
            predictions, y_true = session.run([p['prediction'], p['y_true']],
                                              feed_dict=feed_dict)
            total_evaluation += evaluation(y_true=y_true,
                                           y_predict=predictions)
            print(total_evaluation)
        print('train accuracy \n' + str(total_evaluation))
        num_accuracy_batches = 15
        total_evaluation = evaluation()
        for x in range(num_accuracy_batches):
            voxs, labels = dataset.test.random_sampling.get_fmri_brain(
                cut_shape, batch_size, time_dim)
            feed_dict = {
                voxnet[0]: voxs,
                p['labels']: labels,
                FCNs.training: False,
                voxnet.training: False
            }
            predictions, y_true = session.run([p['prediction'], p['y_true']],
                                              feed_dict=feed_dict)
            total_evaluation += evaluation(y_true=y_true,
                                           y_predict=predictions)
            print(total_evaluation)
        print('test accuracy \n' + str(total_evaluation))
コード例 #10
0
def train_GRUs(cross_epoch=0,
               data_index=None,
               cut_shape=None,
               data_type=['MCIc', 'MCInc'],
               pre_dir='/home/anzeng/rhb/fmri_data',
               num_batches=256 * 5,
               voxnet_point=None,
               test_size=6,
               brain_map=[217],
               f_handle=None):
    #清除keras后台数据
    keras.backend.clear_session()
    # #####超参##########
    time_dim = 80
    batch_size = 50
    g1 = tf.Graph()
    g2 = tf.Graph()
    # keras.optimizers.Adam()
    # #####################
    # #加载模型#
    xyz = 32
    # input_shape = [xyz, xyz, xyz, 1]
    # inputs = keras.layers.Input(input_shape)

    # _3D_CNN = keras.models.load_model(voxnet_point)
    auto = keras.models.load_model(voxnet_point)
    # _3D_CNN.predict(np.zeros([1,32,32,32,1]),1)
    # for i in _3D_CNN.layers:
    #     print(i.name)
    layer_name = 'feature'
    feature_generator = keras.Model(inputs=auto.layers[0].input,
                                    outputs=auto.get_layer(layer_name).output)
    # feature_generator = keras.Model(inputs=_3D_CNN.layers[0].input, outputs=_3D_CNN.get_layer(layer_name).output)
    # sample = np.zeros([1, xyz, xyz, xyz, 1])
    # y = feature_generator.predict_on_batch(sample)
    # print(y)

    dataset = fMRI_data(data_type,
                        data_index=data_index,
                        varbass=False,
                        dir=pre_dir,
                        model=feature_generator,
                        cut_shape=cut_shape)
    grus = GRUs(input_shape=[time_dim, 100], nb_class=2)
    # feature_generator = keras.Sequential()
    # feature_generator.get_layer()
    # voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
    # with tf.Session() as session:
    #     session.run(tf.global_variables_initializer())
    #     voxnet.npz_saver.restore(session, voxnet_point)
    logs_path = os.path.join(cfg.fcn_checkpoint_dir,
                             'train_' + str(cross_epoch))
    if os.path.isdir(logs_path):
        shutil.rmtree(logs_path)
    grus.fit_generator(
        dataset.get_time_batch(cut_shape=cut_shape,
                               time_dim=time_dim,
                               batch_size=batch_size,
                               _batch_mode='oversampling',
                               _mode='train'),
        steps_per_epoch=8,
        epochs=num_batches,
        validation_data=dataset.get_time_batch(cut_shape=cut_shape,
                                               time_dim=time_dim,
                                               batch_size=1,
                                               _batch_mode='random',
                                               _mode='test',
                                               flag=1),
        validation_steps=test_size,
        callbacks=[
            toolbox.EarlyStoppingByACC('acc', 0.90, patience=10),
            keras.callbacks.TensorBoard(log_dir=logs_path)
        ])
    # 预测
    #构造循环迭代器,顺序不打乱的
    train_iter = iter(
        dataset.get_time_batch(cut_shape=cut_shape,
                               time_dim=time_dim,
                               batch_size=1,
                               _batch_mode='random',
                               _mode='train',
                               flag=1))
    test_iter = iter(
        dataset.get_time_batch(cut_shape=cut_shape,
                               time_dim=time_dim,
                               batch_size=1,
                               _batch_mode='random',
                               _mode='test',
                               flag=1))

    # for i in range(num_batches / 16):
    #     fcn.fit_generator(dataset.train.oversampling.get_time_batch(session, voxnet, cut_shape, time_dim=time_dim,
    #                                                                 batch_size=batch_size), steps_per_epoch=8,
    #                       epochs=num_batches)
    test_evaluation = evaluation.evaluation()
    print('test evaluation:')
    for i in range(test_size):
        vosx, onehot = test_iter.__next__()
        prediction = grus.predict_on_batch(vosx)
        test_evaluation += evaluation.evaluation(y_true=np.argmax(onehot,
                                                                  axis=1),
                                                 y_predict=np.argmax(
                                                     prediction, axis=1))
        print(test_evaluation)

    train_len = 0
    for i in data_type:
        train_len += len(data_index[i]['train'])

    # train_evaluation = evaluation.evaluation()
    # for i in range(train_len):
    #     vosx, onehot = train_iter.__next__()
    #     prediction = grus.predict_on_batch(vosx)
    #     train_evaluation += evaluation.evaluation(y_true=np.argmax(onehot, axis=1),
    #                                              y_predict=np.argmax(prediction, axis=1))
    #     print(train_evaluation)

    # 利用svm作为最后的分类器
    # print('svm evaluation:')
    # # 构造数据集
    # #BGRU特征提取模型
    # BGRU_feature = keras.Model(inputs=grus.get_layer('BGRU_input').input,outputs=grus.get_layer('fc_1').output)
    # train_data = -1
    # train_label = -1
    # for i in range(train_len):
    #     vosx,onehot = train_iter.__next__()
    #     feature = BGRU_feature.predict_on_batch(vosx)
    #     y_true = np.argmax(onehot,axis=1)
    #     if isinstance(train_data,int):
    #         train_data = feature
    #         train_label = y_true
    #     else:
    #         train_data = np.vstack((train_data,feature))
    #         train_label = np.hstack((train_label,y_true))
    # clf = svm.SVC(C=1.0,kernel='rbf',gamma='auto')
    # clf.fit(train_data,train_label)
    # predict = clf.predict(train_data)
    # svm_train = evaluation.evaluation(y_true=train_label,y_predict=predict)
    # print('svm_train:')
    # print(svm_train)
    #
    # test_data = -1
    # test_label = -1
    # for i in range(test_size):
    #     vosx,onehot = test_iter.__next__()
    #     feature = BGRU_feature.predict_on_batch(vosx)
    #     y_true = np.argmax(onehot,axis=1)
    #     if isinstance(test_data,int):
    #         test_data = feature
    #         test_label = y_true
    #     else:
    #         test_data = np.vstack((test_data,feature))
    #         test_label = np.hstack((test_label,y_true))
    # predict = clf.predict(test_data)
    # svm_test = evaluation.evaluation(y_true=test_label, y_predict=predict)
    # print('svm_test:')
    # print(svm_test)

    if f_handle:
        # f_handle.write('svm_train:\n')
        # f_handle.write(str(svm_train)+'\n')
        # f_handle.write('svm_test:\n')
        # f_handle.write(str(svm_test)+'\n')
        # f_handle.write('train_evaluation\n')
        # f_handle.write(str(train_evaluation) + '\n')
        f_handle.write('test_evaluation\n')
        f_handle.write(str(test_evaluation) + '\n')

    if not os.path.exists(cfg.fcn_checkpoint_dir):
        os.makedirs(cfg.fcn_checkpoint_dir)
        filepath = os.path.join(cfg.fcn_checkpoint_dir,
                                'train_' + str(cross_epoch) + '.h5')
        grus.save(filepath=filepath)

    return test_evaluation, evaluation.evaluation()
コード例 #11
0
def train_fcn(cross_epoch=0,
              data_index=None,
              cut_shape=None,
              data_type=['MCIc', 'MCInc'],
              pre_dir='/home/anzeng/rhb/fmri_data',
              num_batches=256 * 5,
              voxnet_point=None,
              test_size=6,
              brain_map=[217]):
    tf.reset_default_graph()
    #####超参##########
    time_dim = 20
    batch_size = 8
    #####################
    dataset = fMRI_data(data_type,
                        data_index=data_index,
                        varbass=False,
                        dir=pre_dir)
    fcn = FCNs()
    input_shape = [None, 32, 32, 32, 1]
    voxnet = VoxNet(input_shape=input_shape, voxnet_type='cut')
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        voxnet.npz_saver.restore(session, voxnet_point)
        fcn.fit_generator(
            dataset.get_time_batch(session,
                                   voxnet,
                                   cut_shape,
                                   time_dim=time_dim,
                                   batch_size=batch_size,
                                   _batch_mode='oversampling',
                                   _mode='train'),
            steps_per_epoch=8,
            epochs=num_batches,
            validation_data=dataset.test.random_sampling.get_time_batch(
                session,
                voxnet,
                cut_shape,
                time_dim=time_dim,
                batch_size=1,
                _batch_mode='random',
                _mode='test'),
            validation_steps=test_size,
            callbacks=[toolbox.EarlyStoppingByACC('loss', 0.3, patience=10)])
        #预测
        data_iter = iter(
            dataset.test.random_sampling.get_time_batch(session,
                                                        voxnet,
                                                        cut_shape,
                                                        time_dim=time_dim,
                                                        batch_size=1))

        # for i in range(num_batches / 16):
        #     fcn.fit_generator(dataset.train.oversampling.get_time_batch(session, voxnet, cut_shape, time_dim=time_dim,
        #                                                                 batch_size=batch_size), steps_per_epoch=8,
        #                       epochs=num_batches)
        test_evaluation = evaluation.evaluation()
        for i in range(test_size):
            vosx, onehot = data_iter.__next__()
            prediction = fcn.predict_on_batch(vosx)
            test_evaluation += evaluation.evaluation(y_true=np.argmax(onehot,
                                                                      axis=1),
                                                     y_predict=np.argmax(
                                                         prediction, axis=1))
            print(test_evaluation)
        filepath = os.path.join(cfg.fcn_checkpoint_dir,
                                'train_' + str(cross_epoch) + '.h5')
        fcn.save(filepath)
    return test_evaluation