Example #1
0
    def save_to_file(self):
        save_dir = os.path.join(self.saved_models_dir, self.name, self.tag,
                                str(self.training_steps))
        # Allow overwriting an existing save because we don't lose any data here
        tools.rmdir_if_exists(save_dir)
        os.makedirs(save_dir)

        file_path = os.path.join(save_dir, 'data')

        logging.info('Saving model to file %s' % file_path)

        extra = {
            'encoding_name': self.encoding_name,
            'encoding_args': self.encoding_args,
            'encoding_kwargs': self.encoding_kwargs,
            'state_sizes': self.state_sizes,
            'training_steps': self.training_steps,
            'round_steps': self.round_steps
        }
        extra_file_path = file_path + '.pkl'
        with open(extra_file_path, 'xb') as f:
            pickle.dump(extra, f)

        self.saver.save(self.sess, file_path)

        logging.info('Saved model to file %s' % file_path)
Example #2
0
    def init_from_file(self, from_name=None, training_steps=None):
        if from_name is None:
            from_name = self.name
        training_steps_dir = os.path.join(self.saved_models_dir, from_name,
                                          self.tag)
        if training_steps is None:
            training_steps = tools.get_latest_in_dir(training_steps_dir,
                                                     key=int)
        save_dir = os.path.join(training_steps_dir, str(training_steps))
        file_path = os.path.join(save_dir, 'data')

        logging.info('Loading model from file %s' % file_path)

        extra_file_path = file_path + '.pkl'
        with open(extra_file_path, 'rb') as f:
            extra = pickle.load(f)

        encoding_name = extra['encoding_name']
        encoding_args = extra['encoding_args']
        encoding_kwargs = extra['encoding_kwargs']
        self._setup_encoding(encoding_name, *encoding_args, **encoding_kwargs)

        self.state_sizes = extra['state_sizes']
        self.training_steps = extra.get('training_steps', int(training_steps))
        self.round_steps = extra.get('round_steps', 0)

        self._close_sess_if_open()
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.sess = tf.Session(graph=self.graph)

            meta_file_path = file_path + '.meta'
            self.saver = tf.train.import_meta_graph(meta_file_path)

            self.saver.restore(self.sess, file_path)

            self.inputs = self.graph.get_tensor_by_name('inputs:0')
            self.labels = self.graph.get_tensor_by_name('labels:0')
            self.logits = self.graph.get_tensor_by_name('logits:0')
            self.losses = self.graph.get_tensor_by_name('losses:0')
            self.loss_mean = self.graph.get_tensor_by_name('loss_mean:0')
            self.loss_max = self.graph.get_tensor_by_name('loss_max:0')
            self.loss_min = self.graph.get_tensor_by_name('loss_min:0')
            self.loss_sum = self.graph.get_tensor_by_name('loss_sum:0')
            self.optimize = self.graph.get_operation_by_name('optimize')
            self.probabilities = self.graph.get_tensor_by_name(
                'probabilities:0')
            self.next_probabilities = tools.get_tensor_by_name_if_exists(
                self.graph, 'next_probabilities:0')

            # self.init_states = tuple(self.graph.get_tensor_by_name('init_state_' + str(i) + ':0')
            #         for i in range(len(self.state_sizes)))
            # self.output_states = tuple(self.graph.get_tensor_by_name('output_state_' + str(i) + ':0')
            #         for i in range(len(self.state_sizes)))

            self.summary = self.graph.get_tensor_by_name('summary:0')

            self.graph.finalize()

        logging.info('Loaded model from file %s' % file_path)
def detect():
    screenshot = screen_shotter.ScreenShot()
    # On parcourt la colonne du milieu de l'écran, on cherche le premier pixel
    # dont la couleur correspondrait à la ligne de fishing.
    coord_fish_line = find_first_color(
        # TODO : le X de la colonne à analyser est complètement à l'arrache.
        # il faudrait tester plusieurs colonnes, par dichotomie.
        screenshot, (900, 0), (0, +1),
        (0, 204, 255), "rgb", True)
    if coord_fish_line is None:
        info("Impossible de trouver la ligne de fishing")
        return None
    y_line = coord_fish_line[1]
    debug("y:" + str(y_line))

    # Ça va planter si y'a aucun pixel de la couleur cherchée, dans la ligne.
    # Pas supposée arriver puisqu'on vient de trouver un bon pixel dans cette
    # ligne là, juste avant.
    (x1_line, _) = find_first_color(
        screenshot, (0, y_line), (+1, 0),
        (0, 204, 255), "rgb", True)
    (x2_line, _) = find_first_color(
        screenshot, (screen_shotter.size_screen[0]-1, y_line), (-1, 0),
        (0, 204, 255), "rgb", True)
    debug("x1: " + str(x1_line) + " x2: " + str(x2_line))
    return (y_line, x1_line, x2_line)
Example #4
0
 def __init__(self, max_batch_size, max_batch_width, pad_item, skip_padding=False):
     logging.info('Using skip padding %s' % skip_padding)
     self.inputs_array = np.zeros((max_batch_size, max_batch_width), dtype=np.int32)
     self.labels_array = np.zeros((max_batch_size, max_batch_width), dtype=np.int32)
     self.max_batch_size = max_batch_size
     self.max_batch_width = max_batch_width
     self.pad_item = pad_item
     self.skip_padding = skip_padding
Example #5
0
 def init_from_file_or_encoding(self, *args, **kwargs):
     logging.info('Initializing %s from file or encoding...' % self.name)
     try:
         self.init_from_file()
     except FileNotFoundError as e:
         logging.info(
             'File not found for %s, initializing from encoding...' %
             self.name)
         self.init_from_encoding(*args, **kwargs)
Example #6
0
    def test_logger(self):
        # ignore warnings
        warnings.simplefilter("ignore")

        # log something
        log_file = default_log_file + '_basic_test'
        my_logging.prepare_logger(log_file)
        my_logging.info("ABCD")
        my_logging.shutdown()

        # check logfile
        success = 'ABCD' in read_file(log_file + '_info.log')
        self.assertTrue(success)
Example #7
0
    def test_data(self):
        log_file = default_log_file + '_data_test'
        my_logging.prepare_logger(log_file)
        my_logging.data('key', 2)
        my_logging.info('ABCD')
        my_logging.shutdown()

        # check
        content = read_file(log_file + '_data.log')
        d = json.loads(content)
        self.assertEqual(d['key'], 'key')
        self.assertEqual(d['value'], 2)
        self.assertTrue('ABCD' not in content)
Example #8
0
 def __init__(self,
              name,
              tag=None,
              saved_models_dir=None,
              saved_summaries_dir=None):
     self.uuid = uuid.uuid4()
     logging.info('Running __init__ %s' % self.uuid)
     self.name = name
     if tag is None:
         tag = config.TAG
     logging.info('Using tag %s and name %s' % (tag, name))
     if saved_models_dir is None:
         saved_models_dir = config.SAVED_MODELS_DIR
     if saved_summaries_dir is None:
         saved_summaries_dir = config.SAVED_SUMMARIES_DIR
     self.tag = tag
     self.saved_models_dir = saved_models_dir
     self.saved_summaries_dir = saved_summaries_dir
     self.training_steps = 0
     self.round_steps = 0
     self.sess = None
Example #9
0
 def sample(self, starting=None, max_num=50, context=200):
     """
     Args:
         starting: A thing that encodes to a python iterable of numbers
     """
     if starting is None:
         starting = self.encoding.empty()
     curr = list(self.encoding.encode_input_for_testing(starting))
     curr_output = list(curr)
     try:
         for i in range(max_num):
             with tools.DelayedKeyboardInterrupt():
                 # Sample using the last n chars where n = context
                 encoded_inpt = self.encoding.make_input_for_sample(
                     curr[-context:])
                 if self.next_probabilities is not None:
                     probs_batch = self._run(self.next_probabilities,
                                             [encoded_inpt])
                     probs = probs_batch[0]
                 else:
                     probs_batch = self._run(self.probabilities,
                                             [encoded_inpt])
                     probs = probs_batch[0][-1]
                 next_int = np.random.choice(self.effective_alphabet_size,
                                             1,
                                             p=probs).item()
                 curr_dec = self._decode_output_to_list(curr[-5:])
                 next_dec = self._decode_if_ok(next_int)
                 probs_dec = self._make_probs_dec(probs)
                 logging.info('Step %s: curr: %s next: %s probs: %s' %
                              (i, curr_dec, repr(next_dec), probs_dec))
                 if next_int == self.encoding.token_item:
                     break
                 if next_int < self.alphabet_size:
                     curr_output.append(next_int)
                 curr.append(next_int)
     except KeyboardInterrupt:
         logging.info('Cancelling sample...')
     return self.encoding.decode_output(curr_output)
Example #10
0
def find(haystack,
         needle,
         all=False,
         first=False,
         ignore_case=True,
         max_results=50):
    '''Prints path to needle in iterable haystack (can be nested list or dict).

    Example:
        >>> haystack = {
            'fruits': [
                {'color': 'yellow', 'name': 'banana'},
                {'color': 'red', 'name': 'strawberry'},
                {'color': 'yellow', 'name': 'lemon'}],
            'vegetables': [
                {'color': 'green', 'name': 'green pepper'}
        ]}

        >>> find(haystack, 'pepper')
        INFO: Searching...
        haystack['vegetables'][0]['name']: green pepper
        INFO: Found 1 result

        >>> haystack['vegetables'][0]['name']
        'green pepper'

        >>> find(haystack, 'cucumber')
        INFO: Searching...
        INFO: Not found

        >>> find(haystack, 'yellow')
        INFO: Searching...
        haystack['fruits'][0]['color']: yellow
        haystack['fruits'][2]['color']: yellow
        INFO: Found 2 results

        # Case-sensitive
        >>> find(haystack, 'YeLLow', ignore_case=False)
        INFO: Searching...
        INFO: Not found

    Args:
        haystack (dict or list): Nested dict or list.
        needle (anything): Target to find.
        all (bool): Shows all results if True. Up to max_results otherwise.
        first (bool): Returns first result if True. All results otherwise.
        ignore_case (bool): Case-insensitive if True.
        max_results (int): Max number of results to return. Unlimited if negative or zero.
    '''
    info('Searching...')
    all_ = all
    results = _find(haystack, needle, first, ignore_case)
    if not results:
        info('Not found')
        return
    for index, result in enumerate(results):
        if not all_ and index >= max_results > 0:
            # Max number of results reached.
            print 'Displayed first %i results...' % max_results
            break

        # Print path to value and value itself.
        path = ''
        for attr in result:
            if _is_text(attr):
                path += "['%s']" % attr
            else:
                path += '[%s]' % attr
        print '%s%s:' % (var_name(haystack), path),
        print eval('haystack%s' % path)
    num_results = len(results)
    if not first:
        info('Found %s result%s' %
             (human_int(num_results), 's' if num_results > 1 else ''))
Example #11
0
 def handler(self, sig, frame):
     self.signal_received = (sig, frame)
     logging.info('KeyboardInterrupt received.')
Example #12
0
import my_logging as logging

logging.info('Importing tensorflow...')
import tensorflow as tf
logging.info('Imported tensorflow.')
tf.get_default_graph().finalize()

import numpy as np
from datetime import datetime
import uuid
import os
import json
import pickle
import itertools
from abc import ABC, abstractmethod

import config

import tools
"""
Terminology:
    input/inpt: A single string or list of numbers
    inputs: An iterable where each element is an input
    exp_output/exp_outpt: A list of numbers
    exp_outputs: An iterable where each element is an exp_output

Directory structure:
    saved_models/
        shakespeare/
            baseline/
                3000/
# -*- coding: utf-8 -*-

from __future__ import unicode_literals
import time
from my_logging import debug, info

import screen_shotter
import fish_line_detector
from fish_line_analyzer import FishLineAnalyzer, fst
from fish_subline_analyzer import FishSublineAnalyzer
from key_presser import KeyPresser

debug("coucou éé")
info("pouet")

DICT_DELAY_FROM_FISHING_STATE = {
    fst.HIGHLIGHTED : 0.5,
    fst.SIGNAL_SENT : 0.1,
    fst.WAIT_FISH : 0.1,
    fst.FISH_NEAR : 0.000001,
    fst.SEND_SIGNAL : 0.000001,
    fst.CRITICAL_ZONE_NOT_FOUND : 0.1,
}

fish_line_info = fish_line_detector.detect()
if fish_line_info is None:
    raise Exception("impossible de trouver la ligne de fishing à l'écran")

(y_line, x1_line, x2_line) = fish_line_info
fish_subline_analyzer = FishSublineAnalyzer(y_line+5, x1_line, x2_line)
fish_line_analyzer = FishLineAnalyzer(
Example #14
0
    def train(self,
              inputs,
              autosave=None,
              cont=False,
              count=False,
              save_graph=True):
        """
        Args:
            inputs: A python iterable of things that encode to python iterables (if long) or lists (if short) of numbers
        """
        if count:
            logging.info(
                'Counting enabled! This should only be used if inputs can be iterated over multiple times.'
            )
            logging.info('Counting number of steps...')
            # Only use count if inputs and the items in it can be iterated over multiple times
            total_round_steps = self.encode_and_count_batches_for_training(
                inputs)
            logging.info('Number of steps is %s' % total_round_steps)

        batches = self.encode_and_make_batches_for_training(inputs)
        if cont:
            logging.info('Continuing enabled! Starting from round %s' %
                         self.round_steps)
            # Note: round_steps is the number of rounds completed, so
            # is also the index of the next round to be done
            batches = itertools.islice(batches, self.round_steps, None)
        else:
            self.round_steps = 0

        save_dir = os.path.join(self.saved_summaries_dir, self.name, self.tag)
        os.makedirs(save_dir, exist_ok=True)

        summaries_dir = save_dir
        logging.info('Using summaries dir %s' % summaries_dir)
        if save_graph and self.training_steps == 0:
            logging.info('Saving graph...')
            # Only write graph during the first training attempt
            summary_writer = tf.summary.FileWriter(summaries_dir, self.graph)
        else:
            summary_writer = tf.summary.FileWriter(summaries_dir)

        logging.info('Starting training...')
        curr_states = None
        starting_i = self.training_steps
        starting_round = self.round_steps
        # Need this i set for if there ends up being no batches due to continuing
        i = starting_i
        try:
            for batch in batches:
                with tools.DelayedKeyboardInterrupt():
                    # Use key instead of i or self.training_steps
                    # to prevent the unlikely race condition
                    # of hitting the interrupt after
                    # the key is updated but before entering the
                    # DelayedKeyboardInterrupt section
                    self.training_steps += 1
                    self.round_steps += 1
                    i = self.training_steps
                    # i, training_steps will be steps completed after the run_batch
                    if i - 1 == starting_i:
                        loss_max, loss_mean, loss_min = self._run_batch(
                            [self.loss_max, self.loss_mean, self.loss_min],
                            batch, curr_states)
                        logging.info(
                            'Starting values: steps: %s round: %s loss max: %s mean: %s min: %s'
                            % (starting_i, starting_round, loss_max, loss_mean,
                               loss_min))
                        if count:
                            time_rem = tools.TimeRemaining(
                                total_round_steps, starting_round)
                    if i % 10 != 0:
                        _ = self._run_batch([self.optimize], batch,
                                            curr_states)
                    else:
                        _, summary, loss_max, loss_mean, loss_min = self._run_batch(
                            [
                                self.optimize, self.summary, self.loss_max,
                                self.loss_mean, self.loss_min
                            ], batch, curr_states)
                        summary_writer.add_summary(summary, i)
                        # Note: The printed numbers are the numbers from before the optimization update happens
                        # Note: Step numbers start from 1
                        if count:
                            time_rem_str = time_rem.get_str(self.round_steps)
                            logging.info(
                                'Step %s round %s/%s: loss max: %s mean: %s min: %s time rem: %s'
                                %
                                (i, self.round_steps, total_round_steps,
                                 loss_max, loss_mean, loss_min, time_rem_str))
                        else:
                            logging.info(
                                'Step %s round %s: loss max: %s mean: %s min: %s'
                                % (i, self.round_steps, loss_max, loss_mean,
                                   loss_min))
                        #losses, probabilities = self.sess.run([self.losses, self.probabilities], feed_dict={self.inputs: batch})
                        #logging.info('Step %s: losses: %s probs: %s' % (i, losses, probabilities))
                    # curr_states = new_states
                    if autosave is not None and (autosave is not True
                                                 and i % autosave == 0):
                        self.save_to_file()
        except KeyboardInterrupt:
            logging.info('Cancelling training...')
            logging.info('Saved summaries to %s' % summaries_dir)
            if autosave is not None and (autosave is True
                                         or i % autosave != 0):
                # Save the last one only if it hasn't already been saved
                self.save_to_file()
            tools.exit(22)
        else:
            self.round_steps = 0
            logging.info('Saved summaries to %s' % summaries_dir)
            if autosave is not None:
                # Always save the last one because round_steps has changed
                self.save_to_file()
            logging.info('Done training.')
Example #15
0
 def __del__(self):
     logging.info('Running __del__ %s' % self.uuid)
     self._close_sess_if_open()
Example #16
0
    def init_from_encoding(self, encoding_name, *args, **kwargs):
        logging.info('Initializing from encoding...')
        self._setup_encoding(encoding_name, *args, **kwargs)

        self._close_sess_if_open()
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.inputs = tf.placeholder(tf.int32,
                                         shape=(None, None),
                                         name='inputs')
            self.labels = tf.placeholder(tf.int32,
                                         shape=(None, None),
                                         name='labels')
            # inputs is list of batches where each batch is a list of integers in [0, alphabet_size]
            # inputs is tensor with dim batch_size x timesteps

            # inputs_padded and labels_padded are tensors with dim batch_size x (timesteps+1)

            self.inputs_one_hot = tf.one_hot(self.inputs,
                                             self.effective_alphabet_size,
                                             name='inputs_one_hot')
            # inputs_one_hot is a list of batches where each batch is a list of one hot encoded lists
            # inputs_one_hot is a tensor with dim batch_size x (timesteps+1) x (alphabet_size+1)

            self.state_sizes = [4 * self.effective_alphabet_size] * 2

            # def make_init_state(i, state_size):
            #     return tf.placeholder(tf.float32, [2, None, state_size], name='init_state_' + str(i))

            # def make_lstm_state_tuple(init_state):
            #     c, h = tf.unstack(init_state)
            #     return tf.contrib.rnn.LSTMStateTuple(c, h)

            # self.init_states = tuple(make_init_state(i, state_size) for i, state_size in enumerate(self.state_sizes))
            # rnn_init_state = tuple(make_lstm_state_tuple(init_state) for init_state in self.init_states)

            lstm_cells = list(
                map(tf.contrib.rnn.BasicLSTMCell, self.state_sizes))
            lstm = tf.contrib.rnn.MultiRNNCell(lstm_cells)
            rnn_output, rnn_states = tf.nn.dynamic_rnn(lstm,
                                                       self.inputs_one_hot,
                                                       dtype=tf.float32)
            tf.summary.histogram('rnn_output', rnn_output)
            # rnn_outputs is a tensor with dim batch_size x (timesteps+1) x (alphabet_size+1)
            # state is a list of tensors with dim batch_size x state_size

            # def make_output_state(i, state_tuple):
            #     c = state_tuple.c
            #     h = state_tuple.h
            #     return tf.identity(tf.stack([c, h]), name='output_state_' + str(i))

            # self.output_states = tuple(make_output_state(i, state) for i, state in enumerate(rnn_states))

            logits = tf.contrib.layers.fully_connected(
                rnn_output, self.effective_alphabet_size, activation_fn=None)
            self.logits = tf.identity(logits, name='logits')
            tf.summary.histogram('logits', self.logits)

            losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=self.labels, logits=self.logits)
            self.losses = tf.identity(losses, name='losses')
            tf.summary.histogram('losses', self.losses)
            # losses is a tensor with dim batch_size x (timestamps+1)

            self.loss_mean = tf.reduce_mean(self.losses, name='loss_mean')
            tf.summary.scalar('loss_mean', self.loss_mean)
            self.loss_max = tf.reduce_max(self.losses, name='loss_max')
            tf.summary.scalar('loss_max', self.loss_max)
            self.loss_min = tf.reduce_min(self.losses, name='loss_min')
            tf.summary.scalar('loss_min', self.loss_min)
            self.loss_sum = tf.reduce_sum(self.losses, name='loss_sum')
            tf.summary.scalar('loss_sum', self.loss_sum)
            # loss_* is a tensor with a single float

            self.optimize = tf.train.AdamOptimizer().minimize(self.loss_mean,
                                                              name='optimize')
            # self.optimize = tf.train.AdadeltaOptimizer(0.03).minimize(self.loss_mean, name='optimize')

            self.probabilities = tf.identity(tf.nn.softmax(self.logits),
                                             name='probabilities')
            tf.summary.histogram('probabilities', self.probabilities)

            self.next_probabilities = tf.identity(self.probabilities[:, -1],
                                                  name='next_probabailities')

            self.saver = tf.train.Saver()

            self.summary = tf.identity(tf.summary.merge_all(), name='summary')

            self.glob_var_init = tf.global_variables_initializer()

            self.graph.finalize()

            self.sess = tf.Session(graph=self.graph)
            self.sess.run(self.glob_var_init)

        logging.info('Initialized from encoding.')
Example #17
0
 def _close_sess_if_open(self):
     if self.sess is not None:
         self.sess.close()
         self.sess = None
         logging.info('Closed session %s' % self.uuid)