Exemplo n.º 1
0
 def load_model_params(self):
     """This is standard tf_utils stuff."""
     log.info("Loading Model Params")
     class params(object): pass
     params.list_all_ops = [n.name for n in tf.get_default_graph().as_graph_def().node]
     log.debug("Num ops in model: {}".format(len(params.list_all_ops)))
     params.final_layer = tf.get_collection_ref('final_layer')[0]
     #log.debug("Found Final Layer: {}".format(params.final_layer))
     params.input_word = tf.get_collection_ref('input_word')[0]
     #log.debug("Found input tensor: {}".format(params.input_tensor))
     params.input_label = tf.get_collection_ref('input_label')[0]
     #log.debug("Found input label: {}".format(params.input_label))
     params.global_step = tf.get_collection_ref('global_step')[0]
     #log.debug("Found global_step: {}".format(params.global_step))
     params.learn_rate = tf.get_collection_ref('learn_rate')[0]
     #log.debug("Found learn_rate: {}".format(params.learn_rate))
     params.correct_pred = tf.get_collection_ref('correct_pred')[0]
     #log.debug("Found correct_pred op: {}".format(params.correct_pred))
     params.accuracy = tf.get_collection_ref('accuracy')[0]
     #log.debug("Found accuracy op: {}".format(params.accuracy))
     params.cost = tf.get_collection_ref('cost')[0]
     #log.debug("Found cost op: {}".format(params.cost))
     params.optimizer = tf.get_collection_ref('optimizer')[0]
     #log.debug("Found optimizer op: {}".format(params.optimizer))
     params.init_op = tf.get_collection_ref('init_op')[0]
     # log.debug("Found init_op op: {}".format(params.init_op))
     # params.saver = tf.get_collection_ref('saver')[0]
     # log.debug("Found saver op: {}".format(params.saver))
     params.merged = tf.get_collection_ref('merged')[0]
     # log.debug("Found merged op: {}".format(params.merged))
     # params.config = tf.get_collection_ref('config')[0]
     params.test = "okay"
     self.params = params
     return params
Exemplo n.º 2
0
    def read_data(fname):
        """Create numpy representation of text from path."""
        log.info("Processing text at path: {}".format(fname))
        if not os.path.isfile(fname):
            log.warn("{} is an invalid path".format(fname))
            return False

        class sample_text:
            pass

        with open(fname) as f:
            content = f.readlines()
        content = [x.strip() for x in content]
        content = [content[i].split() for i in range(len(content))]
        content = np.array(content)
        sample_text.content = np.reshape(content, [
            -1,
        ])
        sample_text.len = sample_text.content.shape[0]
        sample_text.sample = sample_text.content[np.random.randint(
            0, sample_text.len)]
        # this should be red if lower than x and green if above y.
        log.debug("Sample text is {} words long.".format(sample_text.len))
        log.info("Sample word from text:\n\t{}".format(sample_text.sample))
        log.info("File Loaded successfully.")
        return sample_text
Exemplo n.º 3
0
 def sanity(self):
     """This kind of thing should be standardized."""
     log.info("Starting Stanity Check")
     key = "stuff"
     value = "morestuff"
     self.idx2word.write_data(key, value)
     new_value = self.idx2word.read_data(key)
     assert value == new_value
     log.debug("Passed Stanity Check")
     return True
Exemplo n.º 4
0
    def read_data(self, fname=None, normalize_digits=True):
        """Create numpy representation of text from path."""
        if fname is None:
            fname = "text/test.txt"
        log.info("Processing text at path: {}".format(fname))
        if not os.path.isfile(fname):
            log.warn("{} is an invalid path".format(fname))
            return False

        class sample_text:
            pass

        # starting
        vocab = {}
        with open(fname) as f:
            counter = 0
            for line in f:
                counter += 1
                if counter % 100000 == 0:
                    print("Processing line #{}...".format(counter))
                # print(line)
                tokens = self.basic_tokenizer(line)
                for w in tokens:
                    word = re.sub(self._DIGIT_RE, "0",
                                  w) if normalize_digits else w
                    if word in vocab:
                        vocab[word] += 1
                    else:
                        vocab[word] = 1
        # finishing
        vocab_list = self._START_VOCAB + sorted(
            vocab, key=vocab.get, reverse=True)
        log.info('>> Full Vocabulary Size : {}'.format(len(vocab_list)))

        # add words to database
        for index, word in enumerate(vocab_list):
            log.debug("adding word \"{}\" to database @ {}".format(
                word, index))
            self.idx2word.write_data(str(index), str(word))
            self.word2idx.write_data(str(word), str(index))
            # how much time does this add???
            read_back = int(self.word2idx.read_data(str(word)))
            assert index == read_back

        # sanity check
        if False:
            encoded_sample = self.encode_line(line)
            print("Sample Encoded Line:\n{} == {}".format(
                line, encoded_sample))
            decoded_sample = self.decode_line(encoded_sample)
            print("Sample Decoded line: {}".format(decoded_sample))

        # fin
        log.info("File Loaded successfully.")
        return True
Exemplo n.º 5
0
def run(tags, min_payout=Decimal('0.50')):
    log.info("Curate mode activated", tags=tags)

    if tags is None or len(tags) < 1:
        raise ValueError("You must specify at least one tag")

    log.debug("initializing...")
    steem = Steem(keys=[account.key])
    chain = Blockchain(steem)
    curation = Curation(chain, min_payout)
    log.debug("ready", steem=steem, blockchain=chain, curator=curation)

    curation.watch(tags)
Exemplo n.º 6
0
 def build_dataset(self, sample_set):
     """Gotta have docstrings."""
     sample_set.count = collections.Counter(sample_set.content).most_common()
     sample_set.dictionary = dict()
     log.debug("adding word at pos. word[pos]")
     for word, _ in sample_set.count:
         cur_len = len(sample_set.dictionary)
         #log.debug("{} [{}]".format(word, cur_len))
         sample_set.dictionary[word] = cur_len
         sample_set.reverse_dictionary = dict(zip(sample_set.dictionary.values(),
                                                  sample_set.dictionary.keys()))
     sample_set.dict_len = len(sample_set.dictionary)
     log.debug("len of dictionary {}".format(sample_set.dict_len))
     return sample_set
Exemplo n.º 7
0
    def get_text_file(self, file_, trunk=True):
        """Gotta have docstrings."""
        if not os.path.isfile(file_):
            log.warn("{} is an invalid path".format(file_))
            return False

        class sample_text():
            pass

        msg = "Text Results:\n"
        with open(file_) as f:
            content = f.readlines()
        sample_text.all_content = content
        content = [x.strip() for x in content]
        print(len(content))
        content = [content[i].split() for i in range(len(content))]
        content = np.array(content)
        # print(content)
        sample_text.content = np.reshape(content, [
            -1,
        ])
        print(content.shape[:])
        sample_text.nwords = 0
        sample_text.word_set = []
        sample_text.token_to_vector = {}

        for this_line in sample_text.all_content:
            this_line = this_line.strip()
            words_in_line = this_line.split(' ')
            # TOKEN is the first word in the line
            token = words_in_line[0]
            # VECTOR is the line relitive to the token
            vector = words_in_line[1:]  # this line minus the token
            # one hot encoded...
            sample_text.token_to_vector[token] = vector
            for word in words_in_line:
                sample_text.nwords += 1

        del sample_text.all_content  # maybe ... save on some rams
        msg += "Num Words: {}\n".format(sample_text.nwords)
        sample_text.uwords = sorted(list(set(sample_text.word_set)))
        msg += "Num Unique Words: {}\n".format(len(sample_text.uwords))
        msg += "Num of Sentences or Unique Vectors: {}\n".format(
            len(sample_text.token_to_vector))

        log.debug(msg)
        return sample_text
Exemplo n.º 8
0
def run(tags):
    log.info("Follow mode activated", tags=tags)

    if tags is None or len(tags) < 1:
        raise ValueError("You must specify at least one tag")

    log.debug("initializing...")
    steem = Steem(keys=[cred.key])
    account = Account(cred.id, steem)
    chain = Blockchain(steem)
    log.debug("ready", steem=steem, account=account, blockchain=chain)

    log.info("Gathering our following list...")
    following = account.get_following()
    pending = []
    log.info("Following list retrieved", count=len(following))

    log.info("Watching for new posts...")
    while True:
        stream = map(Post, chain.stream(filter_by=['comment']))

        try:
            for post in stream:
                count = len(pending)
                if count > 0:
                    copy = list(pending)
                    for i in range(count):
                        if have_bandwidth(steem, account):
                            user = copy[i]
                            log.info("following user", user=user)
                            steem.follow(user, account=cred.id)
                            del pending[0]

                        else:
                            log.warn("Waiting for more bandwidth before following another user")
                            break


                if post.is_main_post():
                    log.debug("found a top-level post", author=post.author, tags=post.tags)

                    if post.author != cred.id:
                        for tag in tags:
                            if tag in post.tags:
                                if post.author not in following:
                                    pending.append(post.author)
                                    following.append(post.author)
                                    break

        except PostDoesNotExist as e:
            log.debug("Post has vanished", exception=e)

        except RPCError as e:
            log.error("RPC problem while streaming posts", exception=e)
Exemplo n.º 9
0
 def __init__(self, options):
     """Gonna need a db, and some creds."""
     log.info("Starting AG Chatter Bot.")
     self.options = options
     # Build Constructors
     self.idx2word = Database(
             host=options.redis_host, pass_=options.redis_pass, db=0
         )
     self.word2idx = Database(
             host=options.redis_host, pass_=options.redis_pass, db=1
         )
     self.dataReader = DataReader(
             self.options, self.idx2word, self.word2idx
         )
     self.model = Model(
             self.options
         )
     log.debug(options)
     log.info("Init complete.")
Exemplo n.º 10
0
def talk():
    text = request.args.get('text')

    if text is None or text == '':
        text = request.get_data().decode('us-ascii')

    #log.debug("you said: ", text)
    log.debug("talk", text=text, chatservice=chatservice)

    if chatservice is not None:
        try:
            response = chatservice.talk(text)
            log.info("server says: ", response)
            return response
        except:
            log.error()
            return "oops, malfunction"
    else:
        return "FlaskServer is working and your message received, but no chatbot service was provided"
Exemplo n.º 11
0
def run(args):
    log.info("Market summary mode activated", args=args)

    if args is None or len(args) < 3:
        raise ValueError(
            "You must specify a currency pair, title, and one or more tags")

    pair = args[0]
    title = args[1]
    tags = args[2:]

    log.debug("initializing...")
    steem = Steem(keys=[account.key])
    commit = Commit(steem)
    api = Poloniex()
    market = Market(commit, api, pair)
    log.debug("ready", steem=steem, commit=commit, api=api, market=market)

    market.summarize(title, tags)
Exemplo n.º 12
0
    def process(self):
        try_again = {}

        local_max_payout = Decimal("0")
        local_max_post = None

        for key, post in self.posts.items():
            try:
                now = datetime.utcnow()

                if now - post['created'] >= timedelta(minutes=27):
                    if now - post['created'] < timedelta(minutes=30):
                        post.refresh()
                        payout = Decimal(
                            post.get("pending_payout_value").amount)

                        if payout >= self.min_payout and payout > local_max_payout:
                            local_max_payout = payout
                            local_max_post = post

                else:  # post is not mature enough yet, check it again later
                    try_again[post.identifier] = post

            except PostDoesNotExist as e:
                log.debug("Post has vanished", exception=e)

            except RPCError as e:
                log.error("RPC problem while refreshing post", exception=e)

        self.posts = try_again

        if local_max_post is not None and local_max_payout > self.max_payout:
            log.info("Upvoting post #{}".format(self.votes_today + 1),
                     post=local_max_post,
                     elapsed=local_max_post.time_elapsed(),
                     payout=local_max_payout)
            local_max_post.upvote(voter=account.id)

            self.max_payout = local_max_payout
            self.votes_today += 1

            if self.first_vote is None:
                self.first_vote = now
Exemplo n.º 13
0
 def load_tf_model(self, folder=None):
     """This is standard tf_utils stuff."""
     if folder is None: folder = self.logs_path
     log.info("Loading Model: {}".format("Model_Name"))
     if self.sess:
         self.sess.close()
     try:
         self.sess = tf.InteractiveSession()
         checkpoint_file = tf.train.latest_checkpoint(folder)
         log.info("trying: {}".format(folder))
         saver = tf.train.import_meta_graph(checkpoint_file + ".meta")
         log.debug("loading modelfile {}".format(checkpoint_file))
         self.sess.run(tf.global_variables_initializer())
         saver.restore(self.sess, checkpoint_file)
         log.info("model successfully Loaded: {}".format(checkpoint_file))
         self.saver = saver
         self.model_loaded = True
     except Exception as e:
         log.warn("This folder failed to produce a model {}\n{}".format(folder, e))
         return False
     return True
Exemplo n.º 14
0
def have_bandwidth(steem, account):
    '''
        Determine if the given account has enough bandwidth to do stuff.
        Note that this is just an estimate to return approximately >20% bandwidth remaining.
    '''
    # FIXME: rewrite this once we get a better calculation for determining available bandwidth

    props = steem.get_dynamic_global_properties()
    log.debug("got global properties", props=props)

    total_vests = Amount(props['total_vesting_shares']).amount
    max_bandwidth = int(props['max_virtual_bandwidth'])

    vests = Amount(account['vesting_shares']).amount
    bw_post = steem.get_account_bandwidth(account.name, "post")
    bw_forum = steem.get_account_bandwidth(account.name, "forum")
    bw_market = steem.get_account_bandwidth(account.name, "market")
    log.debug("account bandwidth information",
              vests=vests,
              post=bw_post,
              forum=bw_forum,
              market=bw_market)

    bw_allocated = vests / total_vests
    bw_used = (int(bw_market['average_bandwidth']) +
               int(bw_forum['average_bandwidth'])) / max_bandwidth
    ratio = bw_used / bw_allocated
    log.debug("bandwidth calculation",
              allocated=bw_allocated,
              used=bw_used,
              ratio=ratio)

    return ratio < 9
Exemplo n.º 15
0
    def main(self, args):
        """The Commandline exec of the chatbot network."""
        log.info("Beginning commandline exec of the chatbot network.")
        # get a text file... say lincoln.txt
        try:
            file_ = args[1]
            log.info("Recieving Documents: {}".format(file_))
        except:
            file_ = None
        if file_ is None:
            # file_ = "../text/sample.txt"
            file_ = "text/lincoln.txt"
            log.debug("Going with sample: {}".format(file_))
        # Get some data
        log.info("Opening File: {}".format(file_))
        sample_set = self.read_data(file_)
        if not sample_set:
            return False
        # clean your data
        log.info("Building Database Dictionary")
        sample_set = self.build_redis_dataset(sample_set)
        if not sample_set:
            return False
        """
        # build a tensorboard
        log.info("build tensorflow network")
        log.debug("Trying to Load Old Model")
        if self.load_tf_model(self.logs_path):
            network = self.load_model_params()
        else:
            log.debug("Creating a New Model")
            network = self.build_network(sample_set)
        log.debug("Working with Final Layer {}".format(network.final_layer))

        # do some work
        msg = "Train Iters: {}".format(self.train_iters)
        log.info("Training Details:\n{}".format(msg))
        final_loss, average_acc = self.process_network(sample_set, network)
        """
        return True
Exemplo n.º 16
0
    def watch(self, tags):
        log.info("Watching for new posts...")
        while True:
            stream = map(Post, self.chain.stream(filter_by=['comment']))

            try:
                for post in stream:
                    self.process()

                    if self.first_vote is not None:
                        elapsed = datetime.utcnow() - self.first_vote

                        if self.votes_today > 11 or elapsed >= timedelta(
                                hours=24):
                            wait = timedelta(hours=24) - elapsed

                            if wait.total_seconds() > 0:
                                log.info(
                                    "Maximum votes reached for today, going to sleep now",
                                    wait=wait)
                                sleep(wait.total_seconds())

                            log.info("New day!")
                            self.first_vote = None
                            self.votes_today = 0
                            self.max_payout = Decimal("0")

                            break

                    if post.is_main_post():
                        log.debug(
                            "found a top-level post",
                            post=post,
                            elapsed=post.time_elapsed(),
                            tags=post.tags,
                            total_payout=post.get("total_payout_value"),
                            pending_payout=post.get("pending_payout_value"))

                        for tag in tags:
                            if tag in post.tags:
                                log.debug(
                                    "found a possible curation candidate",
                                    post=post)
                                self.posts[post.identifier] = post
                                break

            except PostDoesNotExist as e:
                log.debug("Post has vanished", exception=e)

            except RPCError as e:
                log.error("RPC problem while streaming posts", exception=e)
Exemplo n.º 17
0
 def main(self):
     if self.database:
         log.debug("found that database")
         if self.write_data('4', '20'):
             log.debug("wrote that data")
             pass
         twenty = self.read_data('4')
         log.debug("read data test: {}".format(twenty))
         if int(twenty) is 20:
             return True
         log.warn("see here... see.")
         return False
     else:
         log.warn("Not logging into the database.")
     return False
Exemplo n.º 18
0
    def main(self):
        """Test of connection settings."""
        if self.database:
            log.debug("found that database")
            if self.write_data('4', '20'):
                log.debug("wrote that data")
            twenty = self.read_data('4')
            log.debug("read data test: {}, type: {}".format(
                twenty, type(twenty)))
            # Gotta stop comparing to literal.
            if int(float(twenty)) is 20:
                return True

            log.warn("see here... see.")
            return False
        else:
            log.warn("Not logging into the database.")
        return False
Exemplo n.º 19
0
    def build_redis_dataset(self, sample_set):
        """Use redis for managing a dynamic words library."""
        log.info("Accessing redis for text management.")
        start_time = time.time()
        sample_set.count = collections.Counter(
            sample_set.content).most_common()
        sample_set.dict = dict()
        sample_set.rev_dict = dict()
        sample_set.dict['UNK'] = 0
        sample_set.rev_dict[0] = 'UNK'
        sample_set.num_unk = 0
        # unk replacer
        unk_repacler = {
            '{}'.format(y): '{}'.format(x)
            for x, y in enumerate(sample_set.content)
        }
        sample_set.Unique_words = 0
        for i, _ in sample_set.count:
            sample_set.Unique_words += 1
        for index, (word, word_instances) in enumerate(sample_set.count):
            mesg = "Popularity Rank: {}, Word: {}: Num References: {}".format(
                index + 1, word, word_instances)
            # the plan! Stop after 10k words. After that,
            # replace the words in the input text as UNK.
            if index <= self.vocab_size:
                # add each entry to the dict
                sample_set.dict[word] = index
                sample_set.rev_dict[index] = word
            else:
                # This takes time...
                loop_start = time.time()
                print("##################################")
                print("- Looking for {} instances of {}".format(
                    word_instances, word))
                for j in range(word_instances):
                    word_place = unk_repacler[word]
                    sample_set.num_unk += 1
                    print("- Place in data to replace a word: {}".format(
                        word_place))
                    print("-is {} this {}".format(
                        word, sample_set.content[int(word_place)]))
                    if word in sample_set.content[int(word_place)]:
                        print("-- Yes.")
                        sample_set.content[int(word_place)] = 'UNK'
                        print('-# Changed to UNK')
                        # update the unk_repacler
                        if j + 1 < word_instances:
                            print(
                                "-! Checking for other instances of word: {}.".
                                format(word))
                            unk_repacler = {
                                '{}'.format(y): '{}'.format(x)
                                for x, y in enumerate(sample_set.content)
                            }
                    else:
                        print("-! Bogus word. {}".format(word))

                loop_end = time.time()
                # if index % 100 == 0:
                elap = loop_end - loop_start
                left = sample_set.Unique_words - (sample_set.num_unk +
                                                  self.vocab_size)
                print("Word took {} to fix.".format(elapsed(elap)))
                print("######|| Have {} left to fix. should take {} ||######".
                      format(left, elapsed(elap * left)))

                # lookup word in input and replace it with 'UNK'
            #    for i_content, content_word in enumerate(sample_set.content):
            #        if word in content_word:
            #            sample_set.content[i_content] = 'UNK'
            #            print("setting {} as UNK, num_unk: {}".format(word, sample_set.num_unk))
            #            sample_set.num_unk += 1
            # do redis next...
        end_time = time.time()
        print("process took {}secs to complete.".format(
            elapsed(end_time - start_time)))
        print("sample_set.num_unk ", sample_set.num_unk)
        log.debug("Recounting Words in dataset: {}".format(len(
            sample_set.dict)))
        log.info("Finished Creating Dictionaries from texts.")
        """
        try:
            word = float(word)
            word_ = self.p.number_to_words(int(word))
            sample_set.num_converted += 1
            sample_set.converted.append((word, word_))
            word = word_
        except:
            pass

        # FIX ME... SEARCH FOR OLD REFERENCE FIRST!
        # this is broken
        if self.database.read_data(word) is cur_len:
            pass
        else:
            self.database.write_data(str(word), int(cur_len))
            self.rev_dict.write_data(int(cur_len), str(word))
            sample_set.num_to_dict += 1
        #self.database.set_wordposition(str(word), int(cur_len))
        sample_set.dict_len += 1
        """
        # log.debug("len of dictionary {}".format(sample_set.dict_len))
        # log.debug("Num Converted words {}".format(sample_set.num_converted))
        # log.debug("Num  words added to database {}".format(sample_set.num_to_dict))
        # print(sample_set.converted)
        return sample_set
Exemplo n.º 20
0
 def read_data(self, key):
     value = self.database.get(key).decode('UTF-8')
     log.debug("read: {}, found: {}".format(key, value))
     return value
Exemplo n.º 21
0
 def write_data(self, key, value):
     self.database.set(key, value)
     log.debug("write: [{}, {}]".format(key, value))
     return True
Exemplo n.º 22
0
# @%@~LICENSE~@%@

from datetime import datetime
from steem import Steem
from steem.account import Account
from steem.blockchain import Blockchain
from steem.commit import Commit
from steem.post import Post
from steem.utils import construct_identifier
from steembase.exceptions import PostDoesNotExist, RPCError

import ag.logging as log

from ag.boiler.__version__ import __version__
from ag.boiler.config import account as cred, mask
log.debug("Account credentials loaded", id=cred.id, key=mask(cred.key))


def run():
    log.info("Timely post mode activated")

    log.debug("initializing...")
    steem = Steem(keys=[cred.key])
    account = Account(cred.id, steem)
    chain = Blockchain(steem)
    commit = Commit(steem)
    log.debug("ready", steem=steem, account=account, blockchain=chain, commit=commit)

    # Because subsequent edits to a post show up as separate post entries in the blockchain,
    # we'll keep a list of candidates keyed by the post identifier which the edits share.
    candidates = {}
Exemplo n.º 23
0
# @%@~LICENSE~@%@

from datetime import datetime, timedelta
from decimal import Decimal
from time import sleep

from steem import Steem
from steem.blockchain import Blockchain
from steem.post import Post
from steembase.exceptions import PostDoesNotExist, RPCError

import ag.logging as log

from ag.boiler.config import account, mask

log.debug("Account credentials loaded", id=account.id, key=mask(account.key))


class Curation():
    def __init__(self, chain, min_payout):
        self.chain = chain
        self.min_payout = min_payout
        self.posts = {}  # queue of curation candidates, keyed by post id
        self.first_vote = None  # time of our first vote (today)
        self.votes_today = 0  # number of times we have voted in the 24 hours since first_vote
        self.max_payout = Decimal(
            "0")  # maximum pending payout ecountered today

    def watch(self, tags):
        log.info("Watching for new posts...")
        while True:
Exemplo n.º 24
0
    def summarize(self, title, tags):
        log.info("Summarizing market...",
                 symbol=self.symbol,
                 against=self.against)

        if self.testing:
            log.info("TESTING MODE ENABLED")

        ticker = self.api.ticker()
        try:
            ticker = ticker[self.against + '_' + self.symbol]
        except KeyError as e:
            log.error("Currency pair not found in ticker data",
                      symbol=self.symbol,
                      against=self.against,
                      exception=e)
            raise ValueError("Currency pair not found in ticker data")

        tz = get_localzone()
        now = datetime.now(tz)
        nowstr = now.strftime('%Y-%m-%d %H:%M:%S %Z')
        log.debug("got ticker data", now=nowstr, ticker=ticker)

        last = Decimal(ticker['last'])
        if self.against == 'USDT' or self.against == 'USD':
            symbol = '$'
            quant = Decimal('0.00')
        else:
            symbol = ''
            quant = Decimal('0.00000000')
        laststr = symbol + str(last.quantize(quant))
        log.debug("last trade", value=laststr)

        ath = None
        newath = False

        nowfile = path.join(
            dir, 'market.' + self.symbol + '-' + self.against + '.time')
        lastfile = path.join(
            dir, 'market.' + self.symbol + '-' + self.against + '.last')
        img_url = None

        if path.exists(nowfile) and path.exists(lastfile):
            prev = True

            with open(nowfile, 'r') as infile:
                prev_now = datetime.fromtimestamp(int(
                    infile.readline().strip()),
                                                  tz=tz)

            with open(lastfile, 'r') as infile:
                prev_last = Decimal(infile.readline().strip())

            prev_permlink = self.make_permlink(prev_now)
            prev_nowstr = prev_now.strftime('%Y-%m-%d %H:%M:%S %Z')

            change_price = last - prev_last
            if change_price < Decimal('0'):
                change_pricestr = symbol + str(
                    change_price.copy_negate().quantize(quant))
            else:
                change_pricestr = symbol + str(change_price.quantize(quant))

            change_pct = (Decimal('100') * change_price / prev_last).quantize(
                Decimal('0.00'))
            if change_pct < Decimal('0'):
                change_pctstr = str(change_pct.copy_negate()) + '%'
            else:
                change_pctstr = str(change_pct) + '%'

            highest = last
            lowest = last

            fig = plt.figure(figsize=(10, 7), facecolor='k')
            ax = fig.add_subplot(1, 1, 1)
            rect = ax.patch
            rect.set_facecolor('k')
            img_title = self.symbol + '-' + self.against + ' at ' + nowstr
            plt.title(img_title)
            ax.xaxis_date()
            plt.xticks(rotation=25)
            ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d %H:%M'))

            # first graph 30-minute candlesticks
            log.info("Plotting 30-minute candlesticks...")

            data = self.api.chartData(pair=self.against + '_' + self.symbol,
                                      start=int(prev_now.strftime("%s")) + 1,
                                      period=1800)

            if len(data) < 0:
                raise ValueError("No data returned")
            elif len(data) == 1:
                try:
                    error = data['error']
                    log.error("Received error from API", error=error)
                    raise ValueError(
                        "Received error from API: {}".format(error))

                except KeyError:
                    if int(data[0]['date']) == 0:
                        raise ValueError(
                            "Too soon! You must wait at least 30 minutes between summaries for candlesticks."
                        )

            for row in data:
                high = Decimal(row['high'])
                if high > highest:
                    highest = high

                low = Decimal(row['low'])
                if low < lowest:
                    lowest = low

                time = datetime.fromtimestamp(int(row['date']))
                popen = Decimal(row['open'])
                close = Decimal(row['close'])

                if close >= popen:
                    color = 'g'
                else:
                    color = 'r'

                vline = Line2D(xdata=(time, time),
                               ydata=(low, high),
                               linewidth=1.5,
                               color=color,
                               antialiased=False)
                oline = Line2D(xdata=(time, time),
                               ydata=(popen, popen),
                               linewidth=1,
                               color=color,
                               antialiased=False,
                               marker=TICKLEFT,
                               markersize=7)
                cline = Line2D(xdata=(time, time),
                               ydata=(close, close),
                               linewidth=1,
                               color=color,
                               antialiased=False,
                               marker=TICKRIGHT,
                               markersize=7)

                ax.add_line(vline)
                ax.add_line(oline)
                ax.add_line(cline)

            # then graph 5-minute lines
            log.info("Plotting 5-minute lines...")

            data = self.api.chartData(pair=self.against + '_' + self.symbol,
                                      start=int(prev_now.strftime("%s")) + 1,
                                      period=300)

            if len(data) < 0:
                raise ValueError("No data returned")
            elif len(data) == 1:
                try:
                    error = data['error']
                    log.error("Received error from API", error=error)
                    raise ValueError(
                        "Received error from API: {}".format(error))

                except KeyError:
                    if int(data[0]['date']) == 0:
                        raise ValueError(
                            "Too soon! You must wait at least 5 minutes between summaries."
                        )

            begin = None

            for row in data:
                high = Decimal(row['high'])
                if high > highest:
                    highest = high

                low = Decimal(row['low'])
                if low < lowest:
                    lowest = low

                time = int(row['date'])
                popen = Decimal(row['open'])
                close = Decimal(row['close'])

                if begin is None:
                    begin = popen

                line = Line2D(xdata=(datetime.fromtimestamp(time),
                                     datetime.fromtimestamp(time + 300)),
                              ydata=(begin, close),
                              linewidth=0.7,
                              color='#FFFF00',
                              antialiased=True)

                ax.add_line(line)
                begin = close

            higheststr = symbol + str(highest.quantize(quant))
            loweststr = symbol + str(lowest.quantize(quant))

            athfile = path.join(
                dir, 'market.' + self.symbol + '-' + self.against + '.ath')
            if path.exists(athfile):
                with open(athfile, 'r') as infile:
                    ath = Decimal(infile.readline().strip())

                if highest > ath:
                    ath = highest
                    newath = True

                    if not testing:
                        with open(athfile, 'w') as out:
                            out.write(str(ath))

            ax.xaxis.grid(True, color='#555555', linestyle='dotted')
            ax.yaxis.grid(True, color='#555555', linestyle='solid')
            plt.tight_layout()
            ax.autoscale_view()

            # save image to file or memory buffer
            if self.testing:
                imgfile = '/tmp/' + self.symbol + '-' + self.against + '.png'
                fig.savefig(imgfile)
                log.info("Market graph PNG saved", file=imgfile)
            else:
                img = io.BytesIO()
                fig.savefig(img, format='png')
                img.seek(0)

            plt.close(fig)

            # now upload result to imgur
            if not self.testing:
                log.info("Uploading plot to imgur...")

                img_b64 = base64.standard_b64encode(img.read())
                client = 'bbe2ecf93d88915'
                headers = {'Authorization': 'Client-ID ' + client}
                imgur_data = {'image': img_b64, 'title': img_title}
                req = Request(url='https://api.imgur.com/3/upload.json',
                              data=urlencode(imgur_data).encode('ASCII'),
                              headers=headers)
                resp = urlopen(req).read()
                resp = json.loads(resp)
                log.debug("Got response from imgur", resp=resp)

                if resp['success'] == True:
                    img_url = resp['data']['link']
                    log.info("Image uploaded successfully", url=img_url)

                else:
                    log.error("Non-successful response from imgur", resp=resp)
                    raise ValueError("Non-successful response from imgur")

        else:
            prev = False

        body = "Market Summary for " + self.symbol
        body += "\n=="
        body += "\n* All prices in *" + self.against + "*"
        body += "\n---"
        body += "\n"
        if prev:
            if change_pct > Decimal('0'):
                body += "\nUp " + change_pctstr
                title += ": Up " + change_pctstr
            elif change_pct < Decimal('0'):
                body += "\nDown " + change_pctstr
                title += ": Down " + change_pctstr
            else:
                body += "\nFlat"
                title += ": Flat"
            if newath:
                body += " (New All Time High Achieved)"
                title += " -- New All Time High!"
            body += "\n-"
            body += "\n" + self.symbol + " **"
            if change_price > Decimal('0'):
                body += "gained " + change_pricestr
            elif change_price < Decimal('0'):
                body += "lost " + change_pricestr
            else:
                body += "had no change"
            body += "** since the [last market summary]"
            body += "(https://steemit.com/@" + account.id + "/" + prev_permlink + ")"
            if change_pct > Decimal('0'):
                body += ", a change of **" + change_pctstr + "**"
            elif change_pct < Decimal('0'):
                body += ", a change of **-" + change_pctstr + "**"
            body += "."
        else:
            body += "\n*This is the first market summary, so no previous comparison data is available.*"
        body += "\n"
        body += "\n* Last trade: *" + laststr + "*"
        if prev:
            body += "\n* Highest trade: *" + higheststr + "*"
            if newath:
                body += " (new all time high)"
            body += "\n* Lowest trade: *" + loweststr + "*"
            if img_url is not None:
                body += "\n"
                body += "\n[![market activity plot](" + img_url + ")](" + img_url + ")"
        body += "\n"
        body += "\n---"
        body += "\n"
        body += "\n* Snapshot taken at *" + nowstr + "*"
        if prev:
            body += "\n* Previous snapshot: *[" + prev_nowstr + "]"
            body += "(https://steemit.com/@" + account.id + "/" + prev_permlink + ")*"
        body += "\n* Quote data from [Poloniex](http://poloniex.com)"
        body += "\n"
        body += "\n<center>Happy trading... stay tuned for the next summary!</center>"
        body += "\n"
        body += "\n---"
        body += "\n<center>*This market summary produced automatically by:"
        body += "\n[![Alpha Griffin logo](http://alphagriffin.com/usr/include/ag/favicon/favicon-128.png)"
        body += "\nAlpha Griffin Boiler bot](https://github.com/AlphaGriffin/boiler)"
        body += "\nv" + __version__ + "*</center>"

        if self.testing:
            print(body)

        permlink = self.make_permlink(now)
        tries = 0
        post = None

        while tries < self.max_tries:
            try:
                log.info("Posting summary...",
                         permlink=permlink,
                         title=title,
                         last=laststr,
                         tags=tags)

                if self.testing:
                    log.warn("Not actually going to post (testing mode)")
                    break

                post = self.commit.post(permlink=permlink,
                                        title=title,
                                        author=account.id,
                                        body=body,
                                        tags=tags,
                                        self_vote=True)

                break

            except RPCError as e:
                log.warn(
                    "Got RPC error while posting, trying again in 1 minute...",
                    exception=e)
                tries += 1
                sleep(60)

        if post is not None:
            log.info("Summary posted successfully", post=post)

            with open(nowfile, 'w') as out:
                out.write(now.strftime("%s"))

            with open(lastfile, 'w') as out:
                out.write(str(last))

            return True

        else:
            if not self.testing:
                log.error("Failed to post summary")

            return False
Exemplo n.º 25
0
def run():
    log.info("Timely post mode activated")

    log.debug("initializing...")
    steem = Steem(keys=[cred.key])
    account = Account(cred.id, steem)
    chain = Blockchain(steem)
    commit = Commit(steem)
    log.debug("ready", steem=steem, account=account, blockchain=chain, commit=commit)

    # Because subsequent edits to a post show up as separate post entries in the blockchain,
    # we'll keep a list of candidates keyed by the post identifier which the edits share.
    candidates = {}

    log.info("Checking post history...")
    history = map(Post, account.history(filter_by=['comment']))

    # FIXME: use steem.get_posts() instead?

    for post in history:
        if post.is_main_post():
            log.debug("found a top-level post", post=post, tags=post.tags)

            if post.tags[0] == cred.id and 'boiled' not in post.tags:
                candidates[post.identifier] = post

    if len(candidates) > 0:
        log.info("Found one or more historical posts to process", posts=candidates)

        deleting = []
        for key, post in candidates.items():
            result = process(commit, post)
            if result or result is None:
                deleting.append(key)
        for key in deleting:
            del candidates[key]

    log.info("Watching for new posts...")
    while True:
        stream = map(Post, chain.stream(filter_by=['comment']))

        try:
            for post in stream:
                if post.is_main_post() and post.author == cred.id:
                    log.debug("found a top-level post", post=post, tags=post.tags)

                    if len(post.tags) == 2 and post.tags[0] == cred.id and post.tags[1] == cred.id:
                        candidates[post.identifier] = post

                deleting = []
                for key, post in candidates.items():
                    result = process(commit, post)
                    if result or result is None:
                        deleting.append(key)
                for key in deleting:
                    del candidates[key]

        except PostDoesNotExist as e:
            log.debug("Post has vanished", exception=e)

        except RPCError as e:
            log.error("RPC problem while streaming posts", exception=e)
Exemplo n.º 26
0
def process(commit, post):
    log.debug("checking post", post=post.__dict__)

    lines = post.body.splitlines()
    if len(lines) < 2:
        log.warn("this post appears to be empty or lacking timely data", post=post)
        return None

    timely = lines[-1].split(' ')
    if len(timely) < 3:
        log.warn("this post lacks timely data: <date> <time> <tag> ...", post=post)
        return None

    when = datetime.strptime('{} {}'.format(timely[0], timely[1]), '%Y-%m-%d %H:%M')

    if datetime.now() >= when:
        log.info("This post is boiling!", post=post)

        tags = timely[2:]
        meta = {'app' : 'boiler/{}'.format(__version__)}
        link = '-' + post.permlink

        if lines[-2] == '```':
            body = "\n".join(lines[:-2])
        else:
            body = "\n".join(lines[:-1])

        body += "\n---"
        body += "\n<center>*This post made timely by:"
        body += "\n[![Alpha Griffin logo](http://alphagriffin.com/usr/include/ag/favicon/favicon-128.png)"
        body += "\nAlpha Griffin Boiler bot](https://github.com/AlphaGriffin/boiler)"
        body += "\nv" + __version__ + "*</center>"


        newpost = commit.post(
                permlink = link,
                title = post.title,
                author = post.author,
                body = body,
                tags = tags,
                json_metadata = meta,
                self_vote = True
                )
        log.debug("new post committed!", result=newpost)

        body = "This post has boiled! Find it now here:"
        body += "\n* https://steemit.com/@"+post.author+"/"+link
        body += "\n---"
        body += "\n<center>*Timely posts made possible by:"
        body += "\n[![Alpha Griffin logo](http://alphagriffin.com/usr/include/ag/favicon/favicon-128.png)"
        body += "\nAlpha Griffin Boiler bot](https://github.com/AlphaGriffin/boiler)"
        body += "\nv" + __version__ + "*</center>"

        meta['tags'] = [post.category, 'boiled']

        edited = commit.post(
                permlink = post.permlink,
                title = post.title,
                author = post.author,
                body = body,
                tags = meta['tags'],
                json_metadata = meta,
                reply_identifier = construct_identifier(post["parent_author"], post["parent_permlink"])
                )
        log.debug("original post edited!", result=edited)

        return True

    else:
        return False
Exemplo n.º 27
0
# Copyright (C) 2017 Alpha Griffin
# @%@~LICENSE~@%@

import ag.logging as log

from os import mkdir, path

from appdirs import AppDirs
dirs = AppDirs("boiler", "Alpha Griffin")

dir = dirs.user_config_dir
log.debug("Starting up", configdir=dir)

if not path.exists(dir):
    log.info("Running first-time setup for configuration...")

    log.debug("Creating user config directory")
    mkdir(dir)

if not path.isdir(dir):
    log.fatal("Expected a directory for configdir", configdir=dir)
    raise Exception("Not a directory: " + dir)


def mask(secret):
    if secret is None:
        return ''
    elif len(secret) < 10:
        return '*' * len(secret)
    else:
        return secret[:3] + ('*' * (len(secret) - 6)) + secret[-3:]
Exemplo n.º 28
0
    def process_network(
        self,
        sample_set,
        network,
    ):
        """This is standard tf_utils stuff."""

        # DEFINES!!
        training_data = sample_set.content

        # dictionary = sample_set.dictionary
        # reverse_dictionary = sample_set.reverse_dictionary
        n_input = self.n_input
        vocab_size = sample_set.dict_len

        # start here
        start_time = time.time()
        session = self.sess
        #if self.sess:
        #    session = self.sess
        #else:
        #    session = tf.Session()
        #session.run(network.init_op)
        writer = tf.summary.FileWriter(self.logs_path)
        _step = 0
        offset = random.randint(0, n_input + 1)
        end_offset = n_input + 1
        acc_total = 0
        loss_total = 0
        display_step = 10
        pred_msg = ' "{}" *returns* "{}" *vs* "{}"\n'
        msg = "step: {0:}, offset: {1:}, acc_total: {2:.2f}, loss_total: {3:.2f}"
        log.debug("Starting the Train Session:")
        # start by adding the whole graph to the Tboard
        writer.add_graph(session.graph)

        for i in range(self.train_iters):
            # Generate a minibatch. Add some randomness on selection process.
            if offset > (len(training_data) - end_offset):
                offset = random.randint(0, self.n_input + 1)
            symbols_in_keys = []
            for i in range(offset, offset + self.n_input):
                symbols_in_keys.append(
                    self.database.read_data(str(training_data[i])))
            symbols_in_keys = np.reshape(np.array(symbols_in_keys),
                                         [-1, n_input, 1])

            symbols_out_onehot = np.zeros([vocab_size], dtype=float)
            # symbols_out_onehot[dictionary[str(training_data[offset + n_input])]] = 1.0
            one_hot = self.database.read_data(
                str(training_data[offset + n_input]))
            if one_hot is None:
                one_hot = 0
            symbols_out_onehot[int(one_hot)] = 1.0
            symbols_out_onehot = np.reshape(symbols_out_onehot, [1, -1])

            feed_dict = {
                network.input_word: symbols_in_keys,
                network.input_label: symbols_out_onehot
            }

            try:
                _, acc, loss, onehot_pred, _step, summary = session.run(
                    [
                        network.optimizer, network.accuracy, network.cost,
                        network.final_layer, network.global_step,
                        network.merged
                    ],
                    feed_dict=feed_dict)

                log.debug("###WORKING {}!!####".format(_step))
                # pool data results
                loss_total += loss
                acc_total += acc
                if i % 25 == 0:
                    # acc pool
                    print("###WORKING2!!####")
                    acc_total = (acc_total * 100) / display_step
                    loss_total = loss_total / display_step
                    # gather datas
                    try:
                        symbols_in = [
                            training_data[i]
                            for i in range(offset, offset + n_input)
                        ]
                        symbols_out = training_data[offset + n_input]
                        symbols_out_pred = self.rev_dict.read_data(
                            int(
                                tf.argmax(onehot_pred,
                                          1).eval(session=session)))
                        # do save actions
                        log.info("Saving the Train Session:\n{}\n{}".format(
                            msg.format(_step, offset, acc_total, loss_total),
                            pred_msg.format(symbols_in, symbols_out,
                                            symbols_out_pred)))
                    except Exception as e:
                        log.warn("Bad Things are happening here: {}\n\t{}\n{}".
                                 format(elapsed(time.time() - start_time), e))
                        pass
                    # Save Functions
                    self.saver.save(session,
                                    self.logs_path + self.filename,
                                    global_step=network.global_step)
                    writer.add_summary(summary, global_step=_step)
                    # projector.visualize_embeddings(writer, network.config)
                    # reset the pooling counters
                    acc_total = 0
                    loss_total = 0
                # end of loop increments
                offset += (n_input + 1)
            except Exception as e:
                log.warn("BLowing it DUDE... {}\nError: {}".format(_step, e))
                pass
        # Save Functions
        self.saver.save(session,
                        self.logs_path + self.filename,
                        global_step=network.global_step)
        writer.add_summary(summary, global_step=_step)
        # projector.visualize_embeddings(writer, network.config)
        log.info("Optimization Finished!")
        log.debug("Elapsed time: {}".format(elapsed(time.time() - start_time)))
        return (loss_total, acc_total)
        session.close()