Example #1
0
    def __init__(self, model_name):
        self.model_name = model_name
        self.action_names = ['A', 'D', 'M', 'L', 'R']
        self.num_actions = len(self.action_names)
        self.memory = deque()

        self.model = Cnn(self.model_name, self.memory)
        self.target_model = Cnn(self.model_name, [], target=True)

        # self.state = np.zeros([1, VISION_F + VISION_B + 1, VISION_W * 2 + 1, 1])
        self.previous_states = np.zeros(
            [1, VISION_F + VISION_B + 1, VISION_W * 2 + 1, 4])
        self.previous_actions = np.zeros([4])
        self.previous_actions.fill(2)
        self.q_values = np.zeros(5)
        self.action = 2

        self.count_states = self.model.get_count_states()

        self.delay_count = 0

        self.epsilon_linear = LinearControlSignal(
            start_value=EPSILON_GREEDY_START_PROB,
            end_value=EPSILON_GREEDY_END_PROB,
            repeat=False)

        self.advantage = 0
        self.value = 0

        self.score = 0
Example #2
0
def findWellStruct():
    allSeqs = helpers.readAllSeqs()
    shutil.rmtree("bestpredictions")
    os.mkdir("bestpredictions")

    names = os.listdir("model")
    testname = ""
    for name in names:
        if name[0:4] == "test":
            testname = name

    best, parameters = hyperparams.getHyperParams("model/" + testname)

    nn_user = nnUser(parameters)

    tf.reset_default_graph()

    with tf.variable_scope("model" + str(int(testname[7:10])), reuse=None):
        model = Cnn(parameters)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, "model/my_model_final.ckpt")

        nn_user.predict(sess, model, allSeqs, "best")
Example #3
0
    def __init__(self,
                 learning=True,
                 epsilon=1,
                 alpha=0.1,
                 discount=1,
                 tolerance=0.01):

        self.Q = dict()
        self.Q1 = dict()
        self.Q2 = dict()

        self.s = states.States()
        self.s.buildAllStates()

        self.cnn = Cnn()

        self.learning = learning
        self.epsilon = epsilon
        self.alpha = alpha
        self.discount = discount
        self.tolerance = tolerance
        self.t = 2

        self.convolutionLayersNumber = 0
        self.poolingLayersNumber = 0
        self.fullyConnectedLayersNumber = 0

        self.convolutionLayersLimit = 3
        self.poolingLayersLimit = 2
        self.fullyConnectedLayersLimit = 2

        self.actionsInitialState()
        self.actionsConvolutionState()
        self.actionsPoolingState()
        self.actionsFullyConnectedState()
Example #4
0
def Cnn_test():
    # initializing the network
    network = Cnn(BATCH_SIZE)
    network.getTheParas(MODEL_FILE)

    # load the test data
    _, _, test_imgs, _, _, test_label = util.load_data(MNIST_PATH, False)

    log_string('------------start test-----------')

    num_batch = test_imgs.shape[0] // BATCH_SIZE
    start = 0
    end = start + BATCH_SIZE
    loss = 0.0
    total_correct = 0.0
    total_seen = 0
    for n in range(num_batch):
        log_string('testing {}/{}(batchs) completed!'.format(n + 1, num_batch))
        current_img = test_imgs[start:end, ...]
        current_label = test_label[start:end, ...]
        start = end
        end += BATCH_SIZE
        predict_val, loss_val = network.forward(current_img, current_label)
        correct = np.sum(predict_val == current_label)
        total_correct += correct
        loss += loss_val
        total_seen += BATCH_SIZE
    log_string('eval mean loss: {}'.format(loss / num_batch))
    log_string('eval accuracy: {}'.format(total_correct / total_seen))
Example #5
0
def createBest(parameters):
    allSeqInfo, allTurnCombs = parseData.getSeqInfo()

    doneSeqs = []

    testSeqs = []
    nn_user = nnUser(parameters)

    #Number of sequences with known structure and less than 3 alanines
    validSet = helpers.validSet(allSeqInfo)

    #Print to command line the current parameter information
    helpers.printParamInfo(parameters, 0)

    with tf.variable_scope("model0"):
        model = Cnn(parameters)

    #For each sequence, place it into the current batch. Then, when there are a
    #"batch number" of sequences collected, train a convo network with them as
    #the validation set. Then, measure the accuracy of the current network.
    sess = tf.Session()
    nn_user.genData([], "test")
    times, costs, metTests, metTrains, metTestBest = \
        nn_user.trainNet(sess,model,0,production=True,saveBest=True,outputCost=parameters.outputCost)
    #Output the best metric average and corresponding hyperparameters to a file
    #in the paramResults directory
    helpers.writeParamInfo("model/", "testNum", parameters, metTestBest, 0)
    sess.close()
Example #6
0
    def __init__(self, num_sift_features, num_classes, sift):
        super(Classifier, self).__init__()

        # define the cnn model:
        self.cnn = Cnn()
        self.sift = sift

        classifier_model = []
        classifier_model += [nn.BatchNorm1d(num_features=192 * 5 * 5 + 1152)]
        classifier_model += [nn.Linear(192 * 5 * 5 + 1152, 2976)]
        classifier_model += [nn.Dropout(0.2)]
        classifier_model += [nn.ReLU()]
        classifier_model += [nn.Linear(2976, 1000)]
        classifier_model += [nn.Dropout(0.2)]
        classifier_model += [nn.ReLU()]
        classifier_model += [nn.Linear(1000, 10)]
        # classifier_model += [nn.Dropout(0.2)]
        # classifier_model += [nn.ReLU()]
        # classifier_model += [nn.Linear(864, 432)]
        # classifier_model += [nn.Dropout(0.2)]
        # classifier_model += [nn.ReLU()]
        # classifier_model += [nn.Linear(432, 10)]

        # classifier_model += [nn.Linear(192 * 6 * 6 + 1152, 192 * 6 * 6 + 1152)]
        # classifier_model += [nn.ReLU()]
        # classifier_model += [nn.Dropout(0.2)]
        # classifier_model += [nn.Linear(192 * 6 * 6 + 1152, 1000)]
        # classifier_model += [nn.ReLU()]
        # classifier_model += [nn.Dropout(0.2)]
        # classifier_model += [nn.Linear(1000, 10)]
        #classifier_model += [nn.Softmax(dim=1)]

        self.classifier = nn.Sequential(*classifier_model)
def test(metadata_file, dataset_dir, model_file):
    generator = AudioGenerator(metadata_path=metadata_file,
                               dataset_path=dataset_dir,
                               batch_size=1)
    vggvox_net = Cnn(model_file)
    metrics = vggvox_net.evaluate_generator(
        test_generator=generator.test(), test_steps=generator.get_test_steps())
    print(
        f"Loss {metrics[0]}, Top 1 Accuracy {metrics[1]}, Top 5 Accuracy {metrics[2]}"
    )
Example #8
0
def train(metadata_file, dataset_dir, checkpoint_file):
    generator = AudioGenerator(metadata_path=metadata_file,
                               dataset_path=dataset_dir,
                               batch_size=20)
    vggvox_net = Cnn()
    vggvox_net.fit_generator(model_checkpoint_path=checkpoint_file,
                             train_generator=generator.train(),
                             train_steps=generator.get_training_steps(),
                             validation_generator=generator.validate(),
                             validation_steps=generator.get_validation_steps())
Example #9
0
def Cnn_train():
    # initializing the network
    network = Cnn(BATCH_SIZE)
    # load the data
    train_imgs, val_imgs, _, train_label, val_label, _ = util.load_data(MNIST_PATH)

    for epoch in range(MAX_EPOCH):
        # eval_one_epoch(network, val_imgs, val_label)
        log_string('------------start train epoch {}/{}----------'.format(epoch, MAX_EPOCH))
        train_one_epoch(network, train_imgs, train_label, epoch)
Example #10
0
    def reset(self, testing=False):

        if testing == True:
            self.epsilon = 0
            self.alpha = 0
        else:
            self.convolutionLayersNumber = 0
            self.poolingLayersNumber = 0
            self.fullyConnectedLayersNumber = 0
            self.epsilon = 0.9999 * self.t
            del self.cnn
            self.cnn = Cnn()
Example #11
0
    def test(self):
        print('---------------Start Testing-----------')
        convNet = ()
        self.cnn = Cnn()
        changeState = self.initial
        convNet += (changeState, )
        while changeState.name != states.TERMINATE:

            changeState = self.getMaxQState(self.Q, changeState)
            convNet += (changeState, )

        print('TestingnConvnet', convNet)
        score = self.cnn.buildModel(convNet)
        print('TestScore', score)
Example #12
0
    def __init__(self):

        conf_path_cnn = '/src/cnn/conf/cnn.yaml'
        with open(conf_path_cnn, 'r') as fd:
            self.conf = yaml.safe_load(fd)

        self.batch_size = self.conf['testing']['batch_size']
        self.checkpoint_path = self.conf['misc']['checkpoint_path']

        self.logger = Logger
        self.preProcessor = PreProcessor(Logger)
        self.cnn = Cnn(Logger, conf_path_cnn, testing=True)
        self.sess = tf.compat.v1.Session()

        self.start()
Example #13
0
def main():
    epochs = 100
    #x, y = load_data(DATA_PATH, verbose=False, num_samples=5)
    x = np.load('/global/scratch/alex_vlissidis/X.npz')['x']
    y = np.load('/global/scratch/alex_vlissidis/y.npz')['y']

    print("training data shapes:", x.shape, y.shape)

    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)

    print("building model...")
    print(x.shape)
    print(y.shape[1])
    model = Cnn(x.shape[1:], y.shape[1])
    model.build()

    history = model.train(x_train, y_train, epochs=epochs)

    print("Saving model...")
    model.model.save('models/model.h5')

    print("Plotting...")
    f, (ax1, ax2) = plt.subplots(2, 1)
    ax1.plot(range(1, epochs + 1),
             history.history['val_acc'],
             'tab:blue',
             label="validation accuracy")
    ax1.plot(range(1, epochs + 1),
             history.history['acc'],
             'tab:red',
             label="training accuracy")

    ax2.plot(range(1, epochs + 1),
             history.history['loss'],
             'tab:orange',
             label="loss")
    ax2.plot(range(1, epochs + 1),
             history.history['val_loss'],
             'tab:green',
             label="validation loss")

    ax1.legend()
    ax2.legend()

    f.savefig('figures/training.png', dpi=300)
    print("Done.")
    def run(self, x, nbit, resolution, error):

        nbits = str(nbit)
        test_data = np.load("./data/" + nbits + "bit" + "/" + nbits + "bit" +
                            "_" + str(resolution) + "x" + str(resolution) +
                            "_test_images.npy")
        train_data = np.load("./data/" + nbits + "bit" + "/" + nbits + "bit" +
                             "_" + str(resolution) + "x" + str(resolution) +
                             "_train_images.npy")

        x = int(x)
        if (error != 0):
            test_data = np.array(
                [self.pixel_bit_error(error, i, nbit) for i in test_data])

        if (x == 1):
            generations = input("enter the number of generations")
            batchSize = input("enter the size of each batch")
            generations = int(generations)
            batchSize = int(batchSize)
            Jesus = Cnn()
            Jesus.run(train_data, test_data, resolution, error, generations,
                      batchSize)
        if (x == 2):
            if (error != 0):
                train_data = np.array(
                    [self.pixel_bit_error(error, i, nbit) for i in train_data])
            Jesus = Svm()
            Jesus.run(train_data, test_data, resolution)
        if (x == 3):
            if (error != 0):
                train_data = np.array(
                    [self.pixel_bit_error(error, i, nbit) for i in train_data])
            k = input("k ?")
            k = int(k)
            Jesus = Knn(k)
            Jesus.run(train_data, test_data, resolution)
        if (x == 4):
            Jesus = Caliente([], error)
            batchSize = input("enter the size of each batch")
            generations = input("enter the number of generations")
            generations = int(generations)
            batchSize = int(batchSize)
            Jesus.run(train_data, test_data, resolution, generations,
                      batchSize)
Example #15
0
def findSingleSeq(testSeq):
    names = os.listdir("model")
    testname = ""
    for name in names:
        if name[0:4] == "test":
            testname = name

    best, parameters = hyperparams.getHyperParams("model/" + testname)

    nn_user = nnUser(parameters)

    tf.reset_default_graph()

    with tf.variable_scope("model" + str(int(testname[7:10])), reuse=None):
        model = Cnn(parameters)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, "model/my_model_final.ckpt")

        nn_user.predict(sess, model, [testSeq], "individ")
Example #16
0
def main(args):
    batch_size = 128
    ds_train, ds_test, info = load_data(batch_size)

    image_shape = info.features["image"].shape
    image_size = tf.reduce_prod(image_shape)
    num_train_samples = info.splits["train"].num_examples
    num_test_samples = info.splits["test"].num_examples

    cnn = Cnn()
    if "init_model" in args.cnn_mode:
        cnn.create_model(batch_size=batch_size,
                         image_shape=image_shape,
                         feature_outputs=int(100))
    if "train" in args.cnn_mode:
        cnn.train(ds_train,
                  ds_test,
                  epochs=10,
                  steps_per_epoch=int(num_train_samples / batch_size))
    if "save" in args.cnn_mode:
        cnn.save()
    if "load" in args.cnn_mode:
        cnn.load_combined_model()
    if "test" in args.cnn_mode:
        cnn.test(ds_test)

    gp = DeepKernelGP(ds_train,
                      num_train_samples,
                      image_size,
                      10,
                      cnn.feature_extractor,
                      num_inducing_points=100)
    if "train" in args.gp_mode:
        gp.train(10000)
    if "test" in args.gp_mode:
        gp.test(ds_test, image_size, batch_size, num_test_samples)
Example #17
0
import tensorflow as tf
from cnn import Cnn
import config
import util

x_train_orig, y_train_orig, x_test_orig, y_test_orig, classes = util.load_data_set(
)
x_train = util.pre_treat(x_train_orig)
x_test = util.pre_treat(x_test_orig)
y_train = util.pre_treat(y_train_orig, is_x=False, class_num=len(classes))
y_test = util.pre_treat(y_test_orig, is_x=False, class_num=len(classes))

cnn = Cnn(config.conv_layers, config.fc_layers, config.filters,
          config.learning_rate, config.beta1, config.beta2)

(m, n_H0, n_W0, n_C0) = x_train.shape
n_y = y_train.shape[1]

# construction calculation graph
cnn.initialize(n_H0, n_W0, n_C0, n_y)
cnn.forward()
cost = cnn.cost()
optimizer = cnn.get_optimizer(cost)
predict, accuracy = cnn.predict()

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)

    for i in range(1, config.num_epochs + 1):
Example #18
0
 def setUp(self) -> None:
     self.subject = Cnn()
Example #19
0
import signal

import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import mnist

from cnn import Cnn
from plain.layers.flatten import Flatten
from plain.layers.maxPooling import MaxPooling
from plain.layers.softmax import Softmax
from vectorized.layers.conv2d import Conv2d
from vectorized.layers.fully_connected import FullyConnected

cnn = Cnn(300, 0.07)


def keyboard_interrupt_handler(signal, frame):
    cnn.save()
    exit(0)


def main():
    plt.ion()
    # cnn = Cnn.load("saved/cnn_12-28-2019_19-36")
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    labels = np.zeros((y_train.shape[0], 10, 1))
    for index, x in enumerate(y_train):
        labels[index, x] = [1]

    images = X_train.reshape(
Example #20
0
# %%

train_x = np.expand_dims(train_x, axis=1)
print(train_x[0])
plt.imshow(train_x[0][0])
per = np.random.permutation(train_x.shape[0])  # 打乱后的行号
rtrain_x = torch.from_numpy(train_x[per])
rtrain_y = torch.from_numpy(train_y[per]).squeeze()
print(per)
plt.figure()
plt.imshow(rtrain_x[0][0])

# %%

cnn = Cnn()
cnn.to(device)
print(cnn)
dummy_input = torch.randn(1, 1, 48, 48, device=device)
torch.onnx.export(cnn,
                  dummy_input,
                  "convnet.onnx",
                  verbose=True,
                  input_names=['input'],
                  output_names=['output'])

# %%

learn_rate = 3e-4
los = nn.CrossEntropyLoss()
loader = Loader(rtrain_x, 64, label=rtrain_y)
Example #21
0
def train():
    TIMESTAMP = "{0:%Y-%m-%d-%H-%M/}".format(datetime.now())
    log.log_info('program start')
    data, num_good, num_bad = util.load_train_data(num_data // 2)
    log.log_debug('Data loading completed')

    # resample
    data, length = util.resample(data, 600)
    data = util.reshape(data, length)
    good_data_origin = data[:num_good, :]
    bad_data_origin = data[num_good:, :]

    # extract bad data for test and train
    permutation = list(np.random.permutation(len(bad_data_origin)))
    shuffled_bad_data = bad_data_origin[permutation, :]
    test_bad_data = shuffled_bad_data[:int(num_bad * 0.3), :]
    train_bad_data_origin = shuffled_bad_data[int(num_bad * 0.3):, :]
    # extract corresponding good data for test and train
    permutation = list(np.random.permutation(len(good_data_origin)))
    shuffled_good_data = good_data_origin[permutation, :]
    test_good_data = shuffled_good_data[:len(test_bad_data), :]
    train_good_data = shuffled_good_data[len(test_bad_data):, :]

    assert len(test_bad_data) == len(test_good_data)
    # construct test data
    test_y = np.array([1.] * len(test_good_data) + [0.] * len(test_bad_data), dtype=np.float).reshape(
        (len(test_bad_data) + len(test_good_data), 1))
    test_x = np.vstack((test_good_data, test_bad_data))

    # expand the number of bad data for train
    train_x = np.vstack((train_good_data, train_bad_data_origin))
    train_y = np.array([1.] * len(train_good_data) + [0.] * len(train_bad_data_origin), dtype=np.float).reshape(
        (len(train_bad_data_origin) + len(train_good_data), 1))

    train_x, train_y, num_expand = util.expand(train_x, train_y)

    # regularize
    for i in range(len(train_x)):
        train_x[i, :, 0] = util.regularize(train_x[i, :, 0])
        train_x[i, :, 1] = util.regularize(train_x[i, :, 1])
        train_x[i, :, 2] = util.regularize(train_x[i, :, 2])
    for i in range(len(test_x)):
        test_x[i, :, 0] = util.regularize(test_x[i, :, 0])
        test_x[i, :, 1] = util.regularize(test_x[i, :, 1])
        test_x[i, :, 2] = util.regularize(test_x[i, :, 2])

    # random
    train_x, train_y = util.shuffle_data(train_x, train_y)

    log.log_debug('prepare completed')
    log.log_info('convolution layers: ' + str(conv_layers))
    log.log_info('filters: ' + str(filters))
    log.log_info('full connected layers: ' + str(fc_layers))
    log.log_info('learning rate: %f' % learning_rate)
    log.log_info('keep prob: ' + str(keep_prob))
    log.log_info('the number of expanding bad data: ' + str(num_expand))
    log.log_info('mini batch size: ' + str(mini_batch_size))

    if mini_batch_size != 0:
        assert mini_batch_size <= len(train_x)

    cnn = Cnn(conv_layers, fc_layers, filters, learning_rate)
    (m, n_W0, n_C0) = train_x.shape
    n_y = train_y.shape[1]

    # construction calculation graph
    cnn.initialize(n_W0, n_C0, n_y)
    cost = cnn.cost()
    optimizer = cnn.get_optimizer(cost)
    predict, accuracy = cnn.predict()

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    with tf.Session() as sess:

        # log for tensorboard
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter("resource/tsb/train/" + TIMESTAMP, sess.graph)
        test_writer = tf.summary.FileWriter("resource/tsb/test/" + TIMESTAMP)

        if enable_debug:
            sess = tf_debug.LocalCLIDebugWrapperSession(sess)

        sess.run(init)

        for i in range(1, num_epochs + 1):
            if mini_batch_size != 0:
                num_mini_batches = int(m / mini_batch_size)
                mini_batches = util.random_mini_batches(train_x, train_y, mini_batch_size)

                cost_value = 0
                for mini_batch in mini_batches:
                    (mini_batch_x, mini_batch_y) = mini_batch
                    _, temp_cost = sess.run([optimizer, cost], feed_dict={cnn.x: mini_batch_x, cnn.y: mini_batch_y,
                                                                          cnn.keep_prob: keep_prob})
                    cost_value += temp_cost
                cost_value /= num_mini_batches
            else:
                _, cost_value = sess.run([optimizer, cost],
                                         feed_dict={cnn.x: train_x, cnn.y: train_y, cnn.keep_prob: keep_prob})

            # disable dropout
            summary_train, train_accuracy = sess.run([merged, accuracy],
                                                     feed_dict={cnn.x: train_x, cnn.y: train_y,
                                                                cnn.keep_prob: 1})
            summary_test, test_accuracy = sess.run([merged, accuracy],
                                                   feed_dict={cnn.x: test_x, cnn.y: test_y, cnn.keep_prob: 1})

            train_writer.add_summary(summary_train, i - 1)
            test_writer.add_summary(summary_test, i - 1)

            if print_detail and (i % 10 == 0 or i == 1):
                info = '\nIteration %d\n' % i + \
                       'Cost: %f\n' % cost_value + \
                       'Train accuracy: %f\n' % train_accuracy + \
                       'Test accuracy: %f' % test_accuracy
                log.log_info(info)

            # stop when test>0.95 and train>0.99
            if test_accuracy >= 0.95 and train_accuracy >= 0.99:
                info = '\nIteration %d\n' % i + \
                       'Cost: %f\n' % cost_value + \
                       'Train accuracy: %f\n' % train_accuracy + \
                       'Test accuracy: %f' % test_accuracy
                log.log_info(info)
                saver.save(sess, "resource/model/" + TIMESTAMP)
                break
            saver.save(sess, "resource/model/" + TIMESTAMP)
        train_writer.close()
        test_writer.close()

    log.log_info('program end')
Example #22
0
import numpy as np
from tqdm import tqdm
from cnn import Cnn
from config import Config
from hyperparameters import Hyperparameters
import matplotlib.pyplot as plt

if __name__ == "__main__":
    cfg = Config()
    data = np.load(cfg.trainingDataName, allow_pickle=True)
    metaData = np.load(cfg.trainingMetadataName, allow_pickle=True)
    numSubjects = len(metaData[0])
    print("Dataset Loaded")

    hyp = Hyperparameters()
    cnn = Cnn(cfg, hyp, numSubjects)
    print("CNN Generated")

    images = ((torch.from_numpy(np.stack(data[:, 0]))).view(
        -1, cfg.imagChannels, cfg.res[0], cfg.res[1]))
    oneHotVecs = torch.Tensor(list(data[:, 1]))
    print("Images Converted to Tensors")

    trainSize = int((1 - cfg.testPercent) * len(data))
    testSize = len(data) - trainSize

    trainImgs = images[:trainSize]
    trainOneHotVecs = oneHotVecs[:trainSize]

    testImgs = images[trainSize:]
    testOneHotVecs = oneHotVecs[trainSize:]
Example #23
0
def createTrainTestPred(parameters, testNum, save):
    allSeqInfo, allTurnCombs = parseData.getSeqInfo()

    doneSeqs = []

    count = 0
    testSeqs = []
    nn_user = nnUser(parameters)

    #Number of sequences with known structure and less than 3 alanines
    numLessThree = len(helpers.validSet(allSeqInfo))

    #Print to command line the current parameter information
    helpers.printParamInfo(parameters, testNum)

    with tf.variable_scope("model" + str(testNum)):
        model = Cnn(parameters)

    bestMetAvg = 0.0
    batchNum = 0

    #For each sequence, place it into the current batch. Then, when there are a
    #"batch number" of sequences collected, train a convo network with them as
    #the validation set. Then, measure the accuracy of the current network.
    sess = tf.Session()
    for seq in allSeqInfo:
        if helpers.numAla(seq) < 3 and seq not in doneSeqs:
            if parameters.verbose:
                print seq
            testSeqs.append(seq)
            count += 1

            doneSeqs.append(seq)
            currSeq = seq
            for i in range(5):
                currSeq = currSeq[5] + currSeq[0:5]
                doneSeqs.append(currSeq)

            if count % parameters.batchSize == 0 or count == numLessThree:
                batchNum += 1
                nn_user.genData(testSeqs, "test")
                times, costs, metTests, metTrains, metTestBest =\
                    nn_user.trainNet(sess,model,testNum,production=False,\
                                    saveBest=save,outputCost=parameters.outputCost)
                bestMetAvg += metTestBest * len(testSeqs) / len(
                    helpers.validSet(allSeqInfo))
                if parameters.outputCost:  #Output files of the training and testing accuracy
                    #over the training process
                    helpers.outputCostFile(times, costs, testNum, batchNum)
                    helpers.outputMetTest(times[0:len(metTests)], metTests,
                                          parameters.metric, testNum, batchNum)
                    helpers.outputMetTrain(times[0:len(metTrains)], metTrains,
                                           parameters.metric, testNum,
                                           batchNum)

                trainSeqs = helpers.createTrainSeqs(allSeqInfo, testSeqs)
                nn_user.predict(sess, model, testSeqs, "test")
                nn_user.predict(sess, model, trainSeqs, "train")

                first = False
                testSeqs = []
                if not parameters.crossValid:
                    break

    #Output the best metric average and corresponding hyperparameters to a file
    #in the paramResults directory
    helpers.writeParamInfo("paramResults/", "testNum", parameters, bestMetAvg,
                           testNum)
    sess.close()
Example #24
0
    def __init__(self):
        # Keep a reference to the "close" line in the data[0] dataseries
        self.dataclose = self.datas[0].close
        self.dataopen = self.datas[0].open
        self.datahigh = self.datas[0].high
        self.datalow = self.datas[0].low
        self.datavolume = self.datas[0].volume

        #initialize cnns
        self.cnns = []
        for i in range(1):
            filename = '/home/gene/git/autoTrading/backtrader/model/' + self.getdatanames(
            )[0] + str(i) + '.h5'
            self.cnns.append(Cnn(filename))
        # Add a MovingAverageSimple indicator
        self.rsi60 = bt.indicators.RSI_Safe(self.datas[0], period=100)
        self.history = []
        self.holding_days = []

        self.indicator_count = 15
        self.indicators = []
        for i in range(self.indicator_count):
            self.indicators.append([])

        for i in range(6, 21):
            j = 0
            self.indicators[j].append(
                bt.indicators.RSI_Safe(self.datas[0], period=i))  # momentum
            j += 1
            self.indicators[j].append(
                bt.indicators.WilliamsR(self.datas[0], period=i))  # momentum
            j += 1
            self.indicators[j].append(
                bt.talib.MFI(self.datahigh,
                             self.datalow,
                             self.dataclose,
                             self.datavolume,
                             period=i))  # momentum
            j += 1
            self.indicators[j].append(
                bt.indicators.RateOfChange(self.datas[0],
                                           period=i))  # momentum
            j += 1
            self.indicators[j].append(bt.talib.CMO(self.dataclose,
                                                   period=i))  # momentum
            j += 1
            self.indicators[j].append(bt.talib.SMA(self.dataclose, period=i))
            j += 1
            self.indicators[j].append(bt.talib.SMA(self.dataopen, period=i))
            j += 1
            self.indicators[j].append(
                bt.indicators.ExponentialMovingAverage(self.datas[0],
                                                       period=i))
            j += 1
            self.indicators[j].append(
                bt.indicators.WeightedMovingAverage(self.datas[0], period=i))
            j += 1
            self.indicators[j].append(
                bt.indicators.HullMovingAverage(self.datas[0], period=i))
            j += 1
            self.indicators[j].append(
                bt.indicators.Trix(self.datas[0], period=i))  # trend
            j += 1
            self.indicators[j].append(
                bt.indicators.CommodityChannelIndex(self.datas[0],
                                                    period=i))  # trend
            j += 1
            self.indicators[j].append(
                bt.indicators.DetrendedPriceOscillator(self.datas[0],
                                                       period=i))  # trend
            j += 1
            self.indicators[j].append(
                bt.indicators.DirectionalMovementIndex(self.datas[0],
                                                       period=i))  # trend
            j += 1
            self.indicators[j].append(
                bt.indicators.BollingerBands(self.datas[0],
                                             period=i))  # volatility
            j += 1