def cb_get_unspent(self, response): # TODO - refactor this! if not isinstance(response, list): (address, unspent) = response if unspent == -1: print "Warning: BTC processor unable to get unspent outputs for address " + str( address) + ", giving up for now" response_msg = queue_task(0, 'btc_update_unspent', { 'address': address, 'unspent_outputs': -1 }) else: response_msg = queue_task(0, 'btc_update_unspent', { 'address': address, 'unspent_outputs': unspent }) self.response_queue.put(response_msg) else: for response_item in response: (address, unspent) = response_item if unspent == -1: print "Warning: BTC processor unable to get unspent outputs for address " + str( address) + ", giving up for now" response_msg = queue_task(0, 'btc_update_unspent', { 'address': address, 'unspent_outputs': -1 }) else: response_msg = queue_task(0, 'btc_update_unspent', { 'address': address, 'unspent_outputs': unspent }) self.response_queue.put(response_msg)
def cb_get_stratum_peers(self, response): if response: # If get peers returned data then process it peers = [] for peer in response: #print peer for item in peer[2]: if item[0] == 'p': port = item[1:] if item[0] == 's': ssl = True sport = item[1:] if sport: # prefer the ssl port if specified port = sport if ssl: # only use servers that support ssl peers.append( (peer[1] + ':' + port + ':s', )) # why the tuple? #print peers[-1] response_msg = queue_task(0, 'btc_update_stratum_peers', {'peers': peers}) # Update our list of peers # TODO - this could be checked and merged with a known-good electrum server list self.stratum_servers = peers # update the local list of peers self.response_queue.put( response_msg ) # also update the backend thread with the current peers so that the config database can be updated # TODO: implement dead host checking, especially needed for access through Tor else: print "Warning: BTC processor unable to refresh list of stratum peers, giving up for now"
def cb_get_unspent(self,response): # TODO - refactor this! if not isinstance(response,list): (address,unspent) = response if unspent == -1: print "Warning: BTC processor unable to get unspent outputs for address " + str(address) + ", giving up for now" response_msg = queue_task(0,'btc_update_unspent',{'address':address,'unspent_outputs':-1}) else: response_msg = queue_task(0,'btc_update_unspent',{'address':address,'unspent_outputs':unspent}) self.response_queue.put(response_msg) else: for response_item in response: (address,unspent) = response_item if unspent == -1: print "Warning: BTC processor unable to get unspent outputs for address " + str(address) + ", giving up for now" response_msg = queue_task(0,'btc_update_unspent',{'address':address,'unspent_outputs':-1}) else: response_msg = queue_task(0,'btc_update_unspent',{'address':address,'unspent_outputs':unspent}) self.response_queue.put(response_msg)
def cb_get_balance(self,response): # TODO - refactor this! if not isinstance(response,list): (address,balance) = response if balance == -1: print "Warning: BTC processor unable to get balance for address " + str(address) + ", giving up for now" response_msg = queue_task(0,'btc_update_balance',{'address':address,'balance_confirmed':-1,'balance_unconfirmed':-1}) else: response_msg = queue_task(0,'btc_update_balance',{'address':address,'balance_confirmed':balance['confirmed'],'balance_unconfirmed':balance['unconfirmed']}) self.response_queue.put(response_msg) else: for response_item in response: (address,balance) = response_item if balance == -1: print "Warning: BTC processor unable to get balance for address " + str(address) + ", giving up for now" response_msg = queue_task(0,'btc_update_balance',{'address':address,'balance_confirmed':-1,'balance_unconfirmed':-1}) else: response_msg = queue_task(0,'btc_update_balance',{'address':address,'balance_confirmed':balance['confirmed'],'balance_unconfirmed':balance['unconfirmed']}) self.response_queue.put(response_msg)
def cb_get_balance(self, response): # TODO - refactor this! if not isinstance(response, list): (address, balance) = response if balance == -1: print "Warning: BTC processor unable to get balance for address " + str( address) + ", giving up for now" response_msg = queue_task( 0, 'btc_update_balance', { 'address': address, 'balance_confirmed': -1, 'balance_unconfirmed': -1 }) else: response_msg = queue_task( 0, 'btc_update_balance', { 'address': address, 'balance_confirmed': balance['confirmed'], 'balance_unconfirmed': balance['unconfirmed'] }) self.response_queue.put(response_msg) else: for response_item in response: (address, balance) = response_item if balance == -1: print "Warning: BTC processor unable to get balance for address " + str( address) + ", giving up for now" response_msg = queue_task( 0, 'btc_update_balance', { 'address': address, 'balance_confirmed': -1, 'balance_unconfirmed': -1 }) else: response_msg = queue_task( 0, 'btc_update_balance', { 'address': address, 'balance_confirmed': balance['confirmed'], 'balance_unconfirmed': balance['unconfirmed'] }) self.response_queue.put(response_msg)
def process_queued_task_request(self, queue_msg, queue): if queue_msg.command == 'make_mqtt_pgp_auth': logger.info( 'Received authentication ticket request from messaging transport service for broker %s', queue_msg.data) password = self.make_pgp_auth(queue_msg.data) queue_msg_pgp_reply = queue_task(id=queue_msg.id, command=queue_msg.command, msg_type=queue_task.REPLY, rc=queue_task.OK, data=password) self.mts_in_queue.put(queue_msg_pgp_reply)
def mqtt_publish_message (self, msg=None, mqtt_topic=None, qos=1, queue_task_id=None, publish=False): rc,mid = self.transport.publish(topic=mqtt_topic,payload=msg,qos=1,retain=publish) if not rc == MQTT_ERR_SUCCESS: # Not delivered queue_reply_msg = queue_task(id=queue_task_id, rc=queue_task.NOT_OK, msg_type=queue_task.REPLY) self.queue_from_message_transport_service.put(queue_reply_msg) logger.warning('Could not deliver MQTT message to %s requested by task queue ID %s',mqtt_topic,queue_task_id) else: self.mqtt_mid_queue_task_map[mid]=queue_task_id # Store the returned MID and associate it with the queue message id
def flush_unfinished_mqtt_operations(self): logger.info('Checking for incomplete MQTT tasks') for mid,task_id in self.mqtt_mid_queue_task_map.items(): queue_reply_msg = queue_task(task_id, command=None, data=None, rc=queue_task.NOT_OK, msg_type=queue_task.REPLY) self.queue_from_message_transport_service.put(queue_reply_msg) logger.warning('Failing and purging incomplete MQTT task %s',task_id) self.mqtt_mid_queue_task_map.clear() self.mqtt_sub_topic_queue_task_map.clear() self.inbound_msg_hashes.clear()
def on_publish(self, client, userdata, mid): logger.info('Message published sucessfuly on %s using %s', self.current_broker, self.current_network) try: queue_msg_id = self.mqtt_mid_queue_task_map[mid] self.mqtt_mid_queue_task_map.__delitem__(mid) except KeyError: logger.error('Could not associate outbound MQTT message ID with a queue task ID') return # Delivered queue_reply_msg = queue_task(queue_msg_id, command=None, data=None, rc=queue_task.OK, msg_type=queue_task.REPLY) self.queue_from_message_transport_service.put(queue_reply_msg)
def on_message(self, client, userdata, msg): logger.info('Message received on %s using %s', msg.topic, self.current_broker) # Send incoming message to messaging system hash = hashlib.sha1(msg.payload) if hash.hexdigest() in self.inbound_msg_hashes: logger.warning('Duplicate message on %s dropped', msg.topic) return else: self.inbound_msg_hashes.add(hash.hexdigest()) queue_update_msg = queue_task(id='mts_mqtt:0', command='inbound_message', data={'payload':msg.payload,'location':msg.topic}, msg_type=queue_task.UPDATE) self.queue_from_message_transport_service.put(queue_update_msg)
def mqtt_subscribe_message (self, mqtt_topic=None, qos=0, queue_task_id=None, persistent=False): rc,mid = self.transport.subscribe(topic=mqtt_topic,qos=qos) if not rc == MQTT_ERR_SUCCESS: # Could not subscribe to get message queue_reply_msg = queue_task(id=queue_task_id, rc=queue_task.NOT_OK, msg_type=queue_task.REPLY) self.queue_from_message_transport_service.put(queue_reply_msg) logger.warning('Could not subscribe to MQTT topic %s requested by task queue ID %s',mqtt_topic,queue_task_id) else: if not persistent: # Only store the mid if the request is for a single retained message - if a mid is present during on_subscribe() then we will unsubscribe self.mqtt_mid_queue_task_map[mid] = queue_task_id # Store the returned MID and associate it with the queue message id self.mqtt_sub_topic_queue_task_map[queue_task_id] = mqtt_topic # Store the desired topic associted withe the queue_task_id
def on_unsubscribe(self, client, userdata, mid): logger.info('Topic unsubscribed on broker %s using %s', self.current_broker, self.current_network) try: queue_msg_id = self.mqtt_mid_queue_task_map[mid] subscribed_topic = self.mqtt_sub_topic_queue_task_map[queue_msg_id] logger.info('Now unsubscribed from %s (originally requested for %s)',subscribed_topic,queue_msg_id) # TODO - if no message was received for this subscription by now then it is now safe to assume that no message exists - deal with that here or in messaging? self.mqtt_mid_queue_task_map.__delitem__(mid) self.mqtt_sub_topic_queue_task_map.__delitem__(queue_msg_id) # get_message has completed sucessfully in any case queue_reply_msg = queue_task(id=queue_msg_id, command=None, data=None, rc=queue_task.OK, msg_type=queue_task.REPLY) self.queue_from_message_transport_service.put(queue_reply_msg) except KeyError: logger.info('Unsubscribed from persistent connection (probably)')
def cb_get_stratum_peers(self,response): if response: # If get peers returned data then process it peers=[] for peer in response: #print peer for item in peer[2]: if item[0] == 'p': port = item[1:] if item[0] == 's': ssl = True sport = item[1:] if sport: # prefer the ssl port if specified port = sport if ssl: # only use servers that support ssl peers.append((peer[1]+':' + port + ':s',)) # why the tuple? #print peers[-1] response_msg = queue_task(0,'btc_update_stratum_peers',{'peers':peers}) # Update our list of peers # TODO - this could be checked and merged with a known-good electrum server list self.stratum_servers = peers # update the local list of peers self.response_queue.put(response_msg) # also update the backend thread with the current peers so that the config database can be updated # TODO: implement dead host checking, especially needed for access through Tor else: print "Warning: BTC processor unable to refresh list of stratum peers, giving up for now"
def run(self): print "BTC Exchange rate thread started using SOCKS proxy " + self.socks_proxy + ":" + self.socks_port # Make the request look like it came from a browser TODO - define the browser headers elsewhere so they can be easily updated headers = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0'), ('Accept','text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ('Accept-Language','en-us,en;q=0.5')] opener = urllib2.build_opener(SocksiPyHandler(socks.SOCKS5, self.socks_proxy, int(self.socks_port))) opener.addheaders = headers while self.running: try: response = opener.open('http://bitpay.com/api/rates',None,30).read() print "Response" + str(response) except: print "Warning: Failed to retrieve current exchange rates via https/socks" sleep (60) # try again in a minute TODO try other exchange api sources else: data = json.loads(response) task = queue_task(1, 'update_exchange_rates', data) print "Exchange rates updated" self.queue.put(task) sleep(900) # default 15 minutes TODO: Randomize this slightly (between 10 - 30 minutes) print "Exchange rate collector shutting down"
def request_auth_ticket(self,broker): # Request an authentication ticket from the main messaging system self.broker_password = None logger.info('Requesting authentication ticket from messaging service for broker %s', self.current_broker) auth_req_task = queue_task(id='mts_mqtt:1',command='make_mqtt_pgp_auth',data=broker,msg_type=queue_task.REQUEST) self.queue_from_message_transport_service.put (auth_req_task)
def run(self): print "Info: BTC Exchange rate thread started using SOCKS proxy " + self.socks_proxy + ":" + self.socks_port # Make the request look like it came from a browser TODO - define the browser headers elsewhere so they can be easily updated headers = [ ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0' ), ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' ), ('Accept-Language', 'en-us,en;q=0.5') ] os.environ[ 'no_proxy'] = '127.0.0.1' # This needs to be set to avoid a hard crash on OSX when using urllib or requests opener = urllib2.build_opener( SocksiPyHandler(socks.SOCKS5, self.socks_proxy, int(self.socks_port))) opener.addheaders = headers BTC_API_BITPAY = 'http://bitpay.com/api/rates' BTC_API_BITCOINCHARTS = 'http://api.bitcoincharts.com/v1/weighted_prices.json' BTC_API_BITCOINAVERAGE = 'https://api.bitcoinaverage.com/ticker/global/all' sources = [BTC_API_BITPAY, BTC_API_BITCOINCHARTS] source_index = 0 while self.running: source = sources[source_index] # BTC_API_BITCOINCHARTS try: response = opener.open(source, None, 30).read() #print "Response" + str(response) data_list = json.loads(response) # Process based on source if source == BTC_API_BITPAY: data = data_list # we can use these results as they come elif source == BTC_API_BITCOINCHARTS: data = [] for code in data_list: if not ( code == 'CHF' or code == 'timestamp' ): # For some reasson CHF is broken in the bitcoincharts api data.append({ 'code': code, 'rate': data_list[code]['24h'] }) elif source == BTC_API_BITCOINAVERAGE: # TODO - Process bitcoinaverage api output print "BTC_API_BITCOINAVERAGE NOT IMPLEMENTED" data = [] for code in data_list: data.append({ 'code': code, 'rate': data_list[code]['24h_avg'] }) task = queue_task(1, 'update_exchange_rates', data) print "Info: Exchange rates updated OK using " + source self.queue.put(task) sleep(600 + random.randint(0, 1200) ) # refresh every 10 - 30 minutes - randomize period except: print "Warning: Failed to retrieve current exchange rates from " + source if source_index < len(sources) - 1: source_index += 1 else: source_index = 0 sleep(10) # try again in 10 seconds print "Exchange rate collector shutting down"