Пример #1
0
 def loadData(self):
     file = open(self.fileName, "r")
     numLines = sum(1 for line in open(self.fileName))
     #numLines = open(self.fileName,"r")
     for i in range(0, numLines):
         strLine = file.readline()
         strLine = strLine.strip('\n')
         feats = strLine.split(' ')
         label = feats[len(feats) - 1]
         feats = feats[0:len(feats) - 2]
         dataObj = DataClass.Data(feats, label)
         self.data.append(dataObj)
Пример #2
0
def train():

    print('\n\n', 'training', '\n\n')
    sess = tf.Session()

    dequeueSize = 100

    global_step = tf.Variable(0, trainable=False)
    starter_learning_rate = 0.1
    decay_step = 25
    decay_rate = 0.96
    learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                               global_step * dequeueSize,
                                               decay_step * dequeueSize,
                                               decay_rate,
                                               staircase=True)
    tf.summary.scalar('learning_rate', learning_rate)

    ######################
    # DATASET PARAMETERS #
    ######################

    if os.path.exists(
            '/media/asgharpn/daten2017-03/Bone_Machine_learning/Learning_dataset/projected_augmented_not_squared_yz_test_01_without_2mice_10'
    ):
        print('\nusing full dataset\n')
        dataBaseDir = '/media/asgharpn/daten2017-03/Bone_Machine_learning/Learning_dataset/projected_augmented_not_squared_yz_test_01_without_2mice_10/'
    else:
        raise NameError('Dataset can not be found')

    trainHdf5 = dataBaseDir + 'bone_projected_train_set.hdf5'
    validHdf5 = dataBaseDir + 'bone_projected_valid_set.hdf5'

    cropSize = 733
    batchSize = 50
    stretchLow = 0.1  # stretch channels lower percentile
    stretchHigh = 99.9  # stretch channels upper percentile

    imSize_x = 733
    imSize_z = 161
    numClasses = 4
    numChan = 1
    data = dataClass.Data(trainHdf5, ['data', 'Index'], batchSize)
    dataTest = dataClass.Data(validHdf5, ['data', 'Index'],
                              batchSize * 2)  # larger batch size at test time

    ### define model
    is_training = tf.placeholder(tf.bool, [],
                                 name='is_training')  # for batch norm
    input = tf.placeholder('float32',
                           shape=[None, imSize_x, imSize_z, numChan],
                           name='input')  # for batch norm
    print(input.get_shape)
    labels = tf.placeholder('float32', shape=[None, numClasses],
                            name='labels')  # for batch norm

    logits = BAAM(input, is_training)
    predicted_y = tf.nn.softmax(logits, name='softmax')

    acc = accuracy(predicted_y, labels)
    cross_entropy = loss_logits(logits, labels)
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(
        cross_entropy, global_step=global_step)

    saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)

    # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(checkpoint_dir + '/train', sess.graph)
    test_writer = tf.summary.FileWriter(checkpoint_dir + '/test', sess.graph)
    sess.run(tf.global_variables_initializer(), {is_training: True})

    # training loop

    for i in range(MAX_STEPS):

        if i % 50 == 0:  # Record execution stats

            batch = dataTest.getBatch()
            processedBatch = procIm.preProcessImages(batch['data'],
                                                     imSize_x,
                                                     imSize_z,
                                                     cropSize,
                                                     numChan,
                                                     rescale=False,
                                                     stretch=False,
                                                     means=None,
                                                     stds=None,
                                                     stretchLow=stretchLow,
                                                     stretchHigh=stretchHigh,
                                                     jitter=False,
                                                     randTransform=False)

            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()
            summary, cur_test_acc, cur_test_loss = sess.run(
                [merged, acc, cross_entropy],
                feed_dict={
                    is_training: False,
                    input: processedBatch,
                    labels: batch['Index']
                },
                options=run_options,
                run_metadata=run_metadata)

            train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
            test_writer.add_summary(summary, i)
            print('Adding run metadata for', i)
            print('Valid accuracy at step %s: %s, loss: %s' %
                  (i, cur_test_acc, cur_test_loss))

        batch = data.getBatch()
        processedBatch = procIm.preProcessImages(batch['data'],
                                                 imSize_x,
                                                 imSize_z,
                                                 cropSize,
                                                 numChan,
                                                 rescale=False,
                                                 stretch=False,
                                                 means=None,
                                                 stds=None,
                                                 stretchLow=stretchLow,
                                                 stretchHigh=stretchHigh,
                                                 jitter=False,
                                                 randTransform=False)

        summary, _, cur_train_acc, cur_train_loss = sess.run(
            [merged, train_step, acc, cross_entropy],
            feed_dict={
                is_training: True,
                input: processedBatch,
                labels: batch['Index']
            })
        train_writer.add_summary(summary, i)
        print('Train accuracy at step %s: %s, loss: %s' %
              (i, cur_train_acc, cur_train_loss))

        if i % SAVE_INTERVAL == 0:
            checkpoint_path = os.path.join(checkpoint_dir, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=i)

    train_writer.close()
    test_writer.close()