Example #1
0
    def __init__(self,
                 logdir,
                 type='tensorboardX',
                 summary=True,
                 step=None,
                 iterations=0,
                 epoch=0,
                 save_frequency=50,
                 tag='log_'):
        self.logger = None
        self.type = type
        self.step = step
        self.checkpoint = CheckpointManager(logdir)
        self.iterations = iterations
        self.epoch = epoch
        self.save_frequency = save_frequency
        self.tag = tag

        self.summary = summary
        if summary:
            if type == 'tensorboardX':
                self.logger = tensorboardX.SummaryWriter(logdir)
            else:
                raise NotImplementedError
        else:
            self.type = 'None'
Example #2
0
def init():
    checkpoint_manager = CheckpointManager(NETWORK_NUMBER)

    init_g = tf.global_variables_initializer()
    init_l = tf.local_variables_initializer()
    with tf.Session() as ses:
        ses.run(init_g)
        ses.run(init_l)
        checkpoint_manager.restore_model(ses)
        sess = ses
Example #3
0
def setup():
    conf = Configuration('..')
    prices_manager = Prices('prices', conf['se'])
    today = date.today()
    range_dates = ('2017-01-01', today.isoformat())

    ckp_manager = CheckpointManager('checkpoint')
    data = ckp_manager.load_base()

    if data is not None:
        print('last checkpoint loaded')
        symbols = data['symbols']
        all_data = data['all_data']
    else:
        blacklist = set(['CAPU', 'PESA', 'PSUR', 'POLL'])
        symbols = conf.symbols()
        all_data = load_all_data(prices_manager, blacklist, symbols,
                                 range_dates)

        print('calculating returns')
        for data in all_data.values():
            # TODO there must be a better way to do this
            returns = pd.DataFrame(data['Adj Close']).apply(
                lambda x: np.log(x) - np.log(x.shift()))
            data['Daily Return'] = returns

        state = {
            'all_data': all_data,
            'symbols': symbols,
        }

        ckp_manager.save_base(state)

    print("using dates [%s - %s]" % range_dates)

    # strategy = StrategyFindBestMA(ckp_manager)
    strategy = StrategyRandom01(ckp_manager)
    print(f'using strategy {type(strategy).__name__}')
    strategy.load(all_data.keys(), all_data)

    return (today, range_dates, conf, prices_manager, all_data, ckp_manager,
            strategy)
Example #4
0
def test_nn(number,
            input_placeholder,
            output_placeholder,
            accuracy,
            cost,
            limit=None):
    checkpoint_manager = CheckpointManager(number)

    init_g = tf.global_variables_initializer()
    init_l = tf.local_variables_initializer()
    with tf.Session() as sess:
        sess.run(init_g)
        sess.run(init_l)
        checkpoint_manager.restore_model(sess)

        counter = 0
        total_accuracy = 0
        total_cost = 0
        for test_images, test_labels in ds.test_batch_generator(BATCH_SIZE):

            feed = {
                input_placeholder: test_images,
                output_placeholder: test_labels
            }

            test_accuracy, test_cost = sess.run([accuracy, cost],
                                                feed_dict=feed)
            print("Batch {:3}, Accuracy: {:3.1%}, Cost: {}" \
                  .format(counter, test_accuracy, test_cost))

            total_accuracy += test_accuracy
            total_cost += test_cost
            counter += 1

        overall_accuracy = total_accuracy / counter
        overall_cost = total_cost / counter

        print("Total test accuracy: {:5.1%}".format(overall_accuracy))

        return overall_accuracy, overall_cost
Example #5
0
def test_nn(number, input_placeholder, output_placeholder, accuracy, cost):
    checkpoint_manager = CheckpointManager(number)


    init_g = tf.global_variables_initializer()
    init_l = tf.local_variables_initializer()
    with tf.Session() as sess:
        sess.run(init_g)
        sess.run(init_l)
        checkpoint_manager.restore_model(sess)

        total_accuracy = 0
        total_cost = 0
        batches = None
        for batch_index, test_images, test_labels in ds.test_batch_generator(100, grayscale=True):

            feed = {
                input_placeholder: test_images,
                output_placeholder: test_labels
            }

            test_accuracy, test_cost = sess.run(
                [accuracy, cost], feed_dict=feed)
            print("Batch {:3}, Accuracy: {:3.1%}, Cost: {}" \
                  .format(batch_index, test_accuracy, test_cost))

            total_accuracy += test_accuracy
            total_cost += test_cost
            batches = batch_index

        overall_accuracy = total_accuracy / (batches + 1)
        overall_cost = total_cost / (batches + 1)

        print("Total test accuracy: {:5.1%}".format(overall_accuracy))

        return overall_accuracy, overall_cost
Example #6
0
def train_nn(number, input_placeholder, output_placeholder, accuracy, cost,
             optimizer):
    checkpoint_manager = CheckpointManager(number)

    init_g = tf.global_variables_initializer()
    init_l = tf.local_variables_initializer()
    with tf.Session() as sess:
        sess.run(init_g)
        sess.run(init_l)

        checkpoint_manager.on_training_start(ds.DATASET_FOLDER, EPOCH_LENGTH,
                                             BATCH_SIZE, LEARNING_RATE,
                                             "AdamOptimizer", True)

        for batch_index, batch_images, batch_labels in ds.training_batch_generator(
                BATCH_SIZE):

            print("Starting batch {:3}".format(batch_index + 1))

            for current_epoch in range(EPOCH_LENGTH):

                feed = {
                    input_placeholder: batch_images,
                    output_placeholder: batch_labels
                }

                epoch_accuracy, epoch_cost, _ = sess.run(
                    [accuracy, cost, optimizer], feed_dict=feed)
                print("Batch {:3}, Epoch {:3} -> Accuracy: {:3.1%}, Cost: {}".
                      format(batch_index + 1, current_epoch + 1,
                             epoch_accuracy, epoch_cost))

                checkpoint_manager.on_epoch_completed()

            batch_accuracy_training, batch_cost_training = sess.run(
                [accuracy, cost], feed_dict=feed)

            print("Batch {} has been finished. Accuracy: {:3.1%}, Cost: {}".
                  format(batch_index + 1, batch_accuracy_training,
                         batch_cost_training))

            checkpoint_manager.on_batch_completed(batch_cost_training,
                                                  batch_accuracy_training)

            checkpoint_manager.save_model(sess)

        print("\nTraining finished!")

        overall_accuracy, overall_cost = \
            test_nn(number, input_placeholder, output_placeholder, accuracy, cost, limit=None)

        checkpoint_manager.on_training_completed(overall_accuracy)
Example #7
0
class Logger:
    def __init__(self,
                 logdir,
                 type='tensorboardX',
                 summary=True,
                 step=None,
                 iterations=0,
                 epoch=0,
                 save_frequency=50,
                 tag='log_'):
        self.logger = None
        self.type = type
        self.step = step
        self.checkpoint = CheckpointManager(logdir)
        self.iterations = iterations
        self.epoch = epoch
        self.save_frequency = save_frequency
        self.tag = tag

        self.summary = summary
        if summary:
            if type == 'tensorboardX':
                self.logger = tensorboardX.SummaryWriter(logdir)
            else:
                raise NotImplementedError
        else:
            self.type = 'None'

    def step_it(self):
        self.iterations += 1

    def save_model(self, models):
        self.checkpoint.save_checkpoint(self.epoch, self.iterations, models,
                                        self.tag)

    def step_epoch(self, models):
        self.epoch += 1
        if self.epoch % self.save_frequency == 0:
            self.checkpoint.save_checkpoint(self.epoch, self.iterations,
                                            models, self.tag)

    def close(self):
        if self.logger is not None:
            self.logger.close()
        self.info("Closing the Logger.")

    def add_scalar(self, tag, scalar_value, step=None):
        if self.type == 'tensorboardX':
            tag = self._transform_tag(tag)
            self.logger.add_scalar(tag, scalar_value, step)

    def add_image(self, tag, image, step=None):
        if self.type == 'tensorboardX':
            tag = self._transform_tag(tag)
            self.logger.add_image(tag, image, step)

    def add_figure(self, tag, image, step=None):
        if self.type == 'tensorboardX':
            tag = self._transform_tag(tag)
            self.logger.add_figure(tag, image, step)

    def add_table(self, tag, tbl, step=None):
        if self.type == 'tensorboardX':
            tag = self._transform_tag(tag)
            tbl_str = "<table width=\"100%\"> "
            tbl_str += "<tr> \
                     <th>Term</th> \
                     <th>Value</th> \
                     </tr>"

            for k, v in tbl.items():
                tbl_str += "<tr> \
                           <td>%s</td> \
                           <td>%s</td> \
                           </tr>" % (k, v)

            tbl_str += "</table>"
            self.logger.add_text(tag, tbl_str, step)

    def _transform_tag(self, tag):
        tag = tag + "/{self.step}" if self.step is not None else tag
        return tag

    def add_results(self, results):
        if self.type == 'tensorboardX':
            tag = self._transform_tag("Results")
            text = "<table width=\"100%\">"
            for k, res in results.items():
                text += "<tr><td>{k}</td>" + " ".join(
                    [str('<td>{x}</td>') for x in res.values()]) + "</tr>"
            text += "</table>"
            self.logger.add_text(tag, text)
def main():
    tf.reset_default_graph()

    NETWORK_NUMBER = 4

    input_placeholder = tf.placeholder(
        tf.float32, shape=[None, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNEL], name='input_placeholder')

    output_placeholder = tf.placeholder(
        tf.float32, shape=[None, 1], name='output_placeholder')

    layer_conv_1, weights_conv_1 = new_conv_layer(
        input=input_placeholder,
        num_input_channels=IMAGE_CHANNEL,
        filter_size=5,
        num_filters=64,
        pooling=2
    )

    layer_conv_2, weights_conv_2 = new_conv_layer(
        input=layer_conv_1,
        num_input_channels=64,
        filter_size=3,
        num_filters=128,
        pooling=2
    )

    layer_conv_3, weights_conv_3 = new_conv_layer(
        input=layer_conv_2,
        num_input_channels=128,
        filter_size=3,
        num_filters=128,
        pooling=None
    )

    layer_conv_4, weights_conv_4 = new_conv_layer(
        input=layer_conv_3,
        num_input_channels=128,
        filter_size=3,
        num_filters=128,
        pooling=None
    )

    layer_conv_5, weights_conv_5 = new_conv_layer(
        input=layer_conv_4,
        num_input_channels=128,
        filter_size=3,
        num_filters=256,
        pooling=3
    )

    layer_flat, num_features = flatten_layer(layer_conv_5)

    layer_fc_1 = new_fc_layer(
        input=layer_flat, num_inputs=num_features, num_outputs=4096)

    layer_fc_1 = tf.nn.sigmoid(layer_fc_1)

    layer_fc_2 = new_fc_layer(
        input=layer_fc_1, num_inputs=4096, num_outputs=4096)

    layer_fc_2 = tf.nn.sigmoid(layer_fc_2)

    layer_output = new_fc_layer(
        input=layer_fc_2, num_inputs=4096, num_outputs=1)

    layer_output = tf.nn.sigmoid(layer_output)

    checkpoint_manager = CheckpointManager(NETWORK_NUMBER)


    init_g = tf.global_variables_initializer()
    init_l = tf.local_variables_initializer()
    with tf.Session() as sess:
        sess.run(init_g)
        sess.run(init_l)

        checkpoint_manager.restore_model(sess)

        imgs = []
        for file in os.listdir("./"):
            if file.endswith(".png"):
                path = os.path.join("./", file)
                im = Image.open(path)
                imgs.append(np.array(im).reshape(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNEL))

        print(layer_output.eval(feed_dict={input_placeholder: imgs},
                                session=sess))