Exemple #1
0
def Train(numberOfEpochPerDataset, numberOfDatasets, checkpointPath, saver,
          outputOp, trainingOp, lossOp, dataIterator, validationDatasetInitOp,
          isTrainingPlaceHolder, inputPlaceholder, labelPlaceholder, imageOp,
          labelOp, imageNameOp):
    old_validation_loss = sys.float_info.max
    training_log_file = open('trainingLog.csv', 'w')
    training_log_file.write('Epoc, Training Loss, Validation Loss\n')

    for dataset_index in range(0, numberOfDatasets):
        #Generate a new set of training data
        if (Parameters.REGENERATE_TRAINING_DATA):
            RegenerateTrainingData.RegenerateTrainingData(
                "objectTransformDatasetTrain.tfrecords")

        #Setup reading from tfrecords file
        training_dataset = dataset.GetDataset(
            Parameters.BATCH_SIZE, 1,
            "/home/charlesrwest/storage/Datasets/objectTransform/objectTransformDatasetTrain.tfrecords"
        )

        # create the initialisation operations
        train_init_op = dataIterator.make_initializer(training_dataset)

        session.run(train_init_op)

        for epoch in range(0, numberOfEpochPerDataset):
            #Training
            [_, training_loss
             ] = TrainForNBatches(trainingOp, lossOp, imageOp, labelOp,
                                  train_init_op, inputPlaceholder,
                                  labelPlaceholder, session,
                                  Parameters.MAX_BATCHES_BEFORE_REPORTING)
            message = "Training Epoch {0} --- " + strftime(
                "%Y-%m-%d %H:%M:%S", gmtime()) + " --- Training Loss: {1}"
            print(message.format(epoch, training_loss))

            sys.stdout.flush()

            #Validation and reporting
            validation_loss = ReportValidationLoss(lossOp, imageOp, labelOp,
                                                   validationDatasetInitOp,
                                                   epoch, inputPlaceholder,
                                                   labelPlaceholder, session)
            SaveOutputsAsJson("results/results" + str(epoch) + ".json",
                              outputOp, lossOp, imageOp, labelOp, imageNameOp,
                              validationDatasetInitOp, inputPlaceholder,
                              labelPlaceholder, session)
            message = "{0}, {1}, {2}\n"
            training_log_file.write(
                message.format(epoch, training_loss, validation_loss))
            training_log_file.flush()

            #Checkpoint model if the validation loss is better
            if validation_loss < old_validation_loss:
                old_validation_loss = validation_loss
                saver.save(session, './object_transform-model')
                print("Validation error improved, so checkpointed")

    training_log_file.close()
Exemple #2
0
    def sense(self):
        bitstring = BitString(''.join(map(str,self.dataset[self.current_index])))
        situation = bitstring[:-1]
        self.groundtruth = bitstring[-1]
        return situation

    def execute(self, action):
        self.current_index += 1
        if action == self.groundtruth:
            self.correct += 1
        return action == self.groundtruth #reward

if __name__ == '__main__':
    #Get dataset
    my_data = dataset.GetDataset('dataset/ETH_BTC/btc6_15.csv')
    my_data2 = dataset.GetDataset('dataset/ETH_BTC/eth6_15.csv')
    #input_data = dataset.TransformToBinary(my_data , enable_indicator=True , pred_days=1) #for each data, [situation, action]
    #input_data = dataset.TransformToBinary2(my_data , 10 , 1)
    input_data = dataset.TransformToBinary3(my_data, enable_indicator=True, pred_days=1 , comp_days=2)
    # input_data2 = dataset.TransformToBinary3(my_data2, enable_indicator=False, pred_days=1 , comp_days=2)

    input_size = input_data.shape[0]
    traindata = input_data[-1:-int(input_size*0.5):-1]
    testdata = input_data[-int(input_size*0.5)::-1]
    # testdata2 = input_data2[::-1]

    sum_up = 0
    test_number = testdata.shape[0]
    for i in range(test_number):
        if testdata[i][-1] == 1:
Exemple #3
0
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])

y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

ti, tl, vi, vl = dataset.GetDataset(0.1, True, True)

batch_size = 32
epoch = 10
print('accuracy:')
print sess.run(accuracy, feed_dict={x: vi, y_: vl, keep_prob: 1.0})
for e in range(epoch):
    for i in range(len(ti) / batch_size):
        batch_start = i * batch_size
        batch_end = batch_start + batch_size
        batch_images = ti[batch_start:batch_end]
        batch_labels = tl[batch_start:batch_end]
        sess.run(train_step,
                 feed_dict={
                     x: batch_images,
                     y_: batch_labels,
Exemple #4
0
                message.format(epoch, training_loss, validation_loss))
            training_log_file.flush()

            #Checkpoint model if the validation loss is better
            if validation_loss < old_validation_loss:
                old_validation_loss = validation_loss
                saver.save(session, './object_transform-model')
                print("Validation error improved, so checkpointed")

    training_log_file.close()


session = tf.Session()

validation_dataset = dataset.GetDataset(
    1, 1,
    "/home/charlesrwest/storage/Datasets/objectTransform/objectTransformDatasetValidate.tfrecords"
)

iterator = tf.data.Iterator.from_structure(validation_dataset.output_types,
                                           validation_dataset.output_shapes)
images, image_names, labels = iterator.get_next()

validation_init_op = iterator.make_initializer(validation_dataset)

#Make the network
is_training_placeholder, input_placeholder, label_place_holder, output, loss = ConstructNetwork(
    Parameters.IMAGE_SIZE, num_channels, Parameters.NUMBER_OF_NETWORK_OUTPUTS)

session.run(tf.global_variables_initializer())

#Add the optimizer