Ejemplo n.º 1
0
class MemcacheCli(object):
    def __init__(self, host, port):
        self.host = host
        self.port = port
        self.client = Client((self.host, self.port))

    def set(self, key, value, expire):
        try:
            return self.client.set(key, value, expire)
        except Exception as e:
            return False

    def get(self, key):
        try:
            return self.client.get(key, default=None)
        except Exception as e:
            return None

    def mset(self, values, expire):
        try:
            return self.client.set_many(values, expire)
        except Exception as e:
            return False

    def mget(self, keys):
        try:
            return self.client.get_many(keys)
        except Exception as e:
            return None
Ejemplo n.º 2
0
    def run(self):

        client = Client(('127.0.0.1', 11211))
        url = 'https://talosintelligence.com/documents/ip-blacklist'
        talosvalue = 'talos-ip'

        try:
            response = requests.get(url)
            if (response):
                responseArr = []
                for line in response.text.splitlines():
                    if (line.startswith('#') == False):
                        responseArr.append(line)
                valueCheck = client.get_many(responseArr)
                for k in responseArr:
                    valueArr = []
                    tempArr = []
                    tempArr.append(talosvalue)
                    if k in valueCheck:
                        val = valueCheck[k].decode()
                        valueArr = talosToMemcache.stringHelper(val)
                        for item in valueArr:
                            if item not in tempArr:
                                tempArr.append(item)
                    client.set(k, tempArr, 300)

        except Exception as e:
            with open('/var/log/misppullLog.txt', 'a') as file:
                file.write(
                    '{0} - talosFeed-script failed with error: {1} \n'.format(
                        str(time.asctime()), str(e)))
Ejemplo n.º 3
0
class FileCacher:
    MEMCACHE_CONFIG = configparser.ConfigParser()
    MEMCACHE_CONFIG.read('config.ini')
    FILE_SIZE_LIMIT = 524238800
    CHUNK_SIZE = 1000000

    def __init__(self):
        server_address = self.MEMCACHE_CONFIG['DEFAULT']['server_address']
        port = int(self.MEMCACHE_CONFIG['DEFAULT']['port'])
        self.memcache_client = Client((server_address, port))
        self.cache_keys = {}

    def store(self, name, infile):
        """
        Store a file to memcache

        :param file_path: Path to the file being stored
        :return: A tuple with a list of keys and a success flag
        """
        if self._check_file_size(infile) > self.FILE_SIZE_LIMIT:
            raise ValueError('File size exceeds the 50 mb limit')
        self.cache_keys[name], statuses = [], []
        with open(infile, 'rb') as file:
            if file:
                for chunk_index, file_chunk in enumerate(
                        iter(lambda: file.read(self.CHUNK_SIZE), b'')):
                    key = f'{name}_chunk{chunk_index}'
                    status = self.memcache_client.set(key, file_chunk)
                    self.cache_keys[name].append(key)
                    statuses.append(status)
        return all(statuses)

    def retrieve(self, name, outfile):
        """
        Retrieve a file from memcache given a name.
        """
        cache_keys = self.cache_keys.get(name)
        file_chunks = self.memcache_client.get_many(cache_keys)
        file_contents = b''.join(file_chunks.values())
        with open(outfile, 'wb') as file:
            file.write(file_contents)
            return file

    def _check_file_size(self, file_path):
        return os.path.getsize(file_path)
    def run(self):

        client = Client(('127.0.0.1', 11211))

        dataTypes = {'DOM', 'IP', 'URL'}

        for dt in dataTypes:
            url = 'https://ransomwaretracker.abuse.ch/downloads/RW_{0}BL.txt'.format(
                dt)
            rwtvalue = 'ransomwaretracker-'
            if (dt == 'DOM'):
                rwtvalue = rwtvalue + 'domain'
            if (dt == 'IP'):
                rwtvalue = rwtvalue + 'ip'
            if (dt == 'URL'):
                rwtvalue = rwtvalue + 'url'

            try:
                response = requests.get(url)
                if (response):
                    responseArr = []
                    for line in response.text.splitlines():
                        if (line.startswith('#') == False):
                            responseArr.append(line)
                    valueCheck = client.get_many(responseArr)
                    for k in responseArr:
                        valueArr = []
                        tempArr = []
                        tempArr.append(rwtvalue)
                        if k in valueCheck:
                            val = valueCheck[k].decode()
                            valueArr = ransomwareToMemcache.stringHelper(val)
                            for item in valueArr:
                                if item not in tempArr:
                                    tempArr.append(item)
                        client.set(k, tempArr, 300)
            except Exception as e:
                with open('/var/log/misppullLog.txt', 'a') as file:
                    file.write(
                        '{0} - ransomwareTrackerFeed-script failed with error: {1} \n'
                        .format(str(time.asctime()), str(e)))
class OrderManager:
    def __init__(self):
        self.exchange = ExchangeInterface(settings.DRY_RUN)
        # Once exchange is created, register exit handler that will always cancel orders
        # on any error.
        atexit.register(self.exit)
        signal.signal(signal.SIGTERM, self.exit)

        logger.info("Using symbol %s." % self.exchange.symbol)

    def init(self):
        if settings.DRY_RUN:
            logger.info(
                "Initializing dry run. Orders printed below represent what would be posted to BitMEX."
            )
        else:
            logger.info(
                "Order Manager initializing, connecting to BitMEX. Live run: executing real trades."
            )

        self.ticks = []
        self.start_time = datetime.now()
        self.instrument = self.exchange.get_instrument()
        self.starting_qty = self.exchange.get_delta()
        self.running_qty = self.starting_qty
        self.reset()

    def reset(self):
        self.history = []
        self.exchange.cancel_all_orders()
        self.memcache = Client(('localhost', 11211))
        self.set_step_size()
        self.sanity_check()
        self.print_status()

        # Create orders and converge.
        self.place_orders()

        # if settings.DRY_RUN:
        #     sys.exit()

    def set_step_size(self):
        """Setup the step-size based on the Aggro setting."""
        if settings.AGGRO is '1m':
            self.step_size = timedelta(minutes=1)
        elif settings.AGGRO is '5m':
            self.step_size = timedelta(minutes=5)
        elif settings.AGGRO is '1h':
            self.step_size = timedelta(hours=1)
        elif settings.AGGRO is '1d':
            self.step_size = timedelta(days=1)
        else:
            raise Exception("AGGRO setting '%s' is invalid." % settings.AGGRO)

    def analyze_history(self):
        """Using past close prices, calculate moving averages, and return whether they have crossed.
        0 = didn't cross, 1 = fast has crossed above medium, -1 = fast has crossed below medium."""
        _key = lambda t, a: self.cache_key(
            t, "%s-price-avg-%s-%s" % (settings.SYMBOL, a, settings.AGGRO))
        # determine time frequency (period) and associated times for calculating moving averages
        begin_time = datetime.utcnow()
        end_time = math.snap_time(begin_time, settings.AGGRO)

        # get price data for moving averages
        # tma_prices = self.get_prices(end=end_time, steps=settings.TMA_PERIODS, binsize=settings.AGGRO)
        mma_prices = self.get_prices(end=end_time,
                                     steps=settings.MMA_PERIODS,
                                     binsize=settings.AGGRO)
        fma_prices = self.get_prices(end=end_time,
                                     steps=settings.FMA_PERIODS,
                                     binsize=settings.AGGRO)
        lookups_finished = datetime.utcnow()

        # calculate the new values for the moving averages and commit to cache
        new_avgs = {}
        # fast moving average - use exponential moving average over 20 periods
        fma = fma_prices.ewm(
            span=settings.FMA_PERIODS,
            min_periods=settings.FMA_PERIODS).mean()[-2:].reset_index(
                drop=True)
        new_avgs[_key(end_time, 'fast')] = str(fma[1])
        # medium moving average - use smooth moving avg over 50 periods
        mma = mma_prices.rolling(
            settings.MMA_PERIODS).mean()[-2:].reset_index(drop=True)
        new_avgs[_key(end_time, 'med')] = str(mma[1])
        # trail moving average - use smooth moving avg over 200 periods
        # tma = tma_prices.rolling(settings.TMA_PERIODS).mean()[-2:].reset_index(drop=True)
        # new_avgs[_key(end_time, 'trail')] = str(tma[1])
        self.memcache.set_many(new_avgs)

        # try to get the past moving averages we want from the cache
        last_time = end_time - self.step_size
        avg_names = ['fast', 'med']  # 'trail'
        averages = self.memcache.get_many(
            [_key(last_time, avg) for avg in avg_names])
        if _key(last_time, 'fast') not in averages:
            logger.info(
                "Don't have a previous value for averages, can't calculate yet."
            )
            return 0
        else:
            fma[0] = averages[_key(last_time, 'fast')]
            mma[0] = averages[_key(last_time, 'med')]
            # tma[0] = averages[_key(last_time, 'trail')]

        # get the sign of the differences of the last two iterations
        last_steps_diff = fma - mma
        last_steps_sign = last_steps_diff > 0

        # determine if avgs have crossed by whether differences' signs have changed
        crossed = last_steps_sign[0] != last_steps_sign[1]
        analysis_finished = datetime.utcnow()

        # log some time analysis and status
        logger.info("Timing -- Get prices: %s, Analysis: %s, Total: %s" %
                    (lookups_finished - begin_time, analysis_finished -
                     lookups_finished, analysis_finished - begin_time))
        logger.info(
            "Moving averages --\nfma:\n%s\nmma:\n%s\ndiffs:\n%s\npos:\n%s\ncrossed: %s"
            % (fma, mma, last_steps_diff, last_steps_sign, crossed))

        # return the direction in which the cross occurred or 0 if nada
        return 0 if not crossed else (-1 if not last_steps_sign[1] else 1)

    def get_prices(self, end, steps, binsize):
        """Pull closing prices from the BitMex API and/or memcached in bulk."""
        _key = lambda t: self.cache_key(
            t, settings.SYMBOL + '-closeprice-' + settings.AGGRO)
        start_dt = end - steps * self.step_size
        # Create a list of all the prices which need to be gotten, then try to pull from cache
        needed_price_times = [
            start_dt + (self.step_size * i) for i in range(steps)
        ]
        prices = self.memcache.get_many([_key(t) for t in needed_price_times])
        # check for which price data came back from the cache
        cache_hits = 0
        for time in needed_price_times:
            key = _key(time)
            # at the point where we find a key missing, bail out so we can get the rest from the api
            if key not in prices:
                start_dt = time
                break
            prices[key] = float(prices[key])
            cache_hits += 1
        logger.info("Got %d close price(s) from the cache." % cache_hits)
        if cache_hits < steps:
            # get the remainder of the data from the API
            history = self.exchange.get_trades(binsize=binsize,
                                               count=(steps - cache_hits),
                                               start=start_dt)
            new_prices = {}
            float_prices = {}
            # prep new prices to add to the cache and to the set for analysis
            for trade in history:
                key = _key(trade['timestamp'])
                new_prices[key] = str(trade['close'])
                float_prices[key] = float(trade['close'])
            # add these new prices to the cache
            self.memcache.set_many(new_prices)
            logger.info("Cached %d new price(s)" % (len(new_prices)))
            # merge new prices with cache-hit prices
            prices = {**prices, **float_prices}
        # Should get back the right number of prices
        if len(prices) != steps:
            raise Exception("Expected to find %d prices but got %d in %s" %
                            (steps, len(prices), prices))
        # Prices should not include None
        if None in prices.values():
            raise Exception("Got None for one of the prices in %s" % prices)
        return pandas.Series(data=list(prices.values()))

    def cache_key(self,
                  t=datetime.utcnow(),
                  datatype=settings.SYMBOL + '-price-' + settings.AGGRO):
        if type(t) == str:
            timestamp = t
        elif type(t) == datetime:
            timestamp = t.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
        else:
            raise Exception(
                "The cache key time t must be a string or datetime.")
        key = "%s-%s" % (timestamp, datatype)
        return key

    def print_status(self):
        """Print the current MM status."""

        margin = self.exchange.get_margin()
        position = self.exchange.get_position()
        self.running_qty = self.exchange.get_delta()
        tickLog = self.exchange.get_instrument()['tickLog']
        self.start_XBt = margin["marginBalance"]

        logger.info("Current XBT Balance: %.6f" % XBt_to_XBT(self.start_XBt))
        logger.info("Current Contract Position: %d" % self.running_qty)
        logger.info("Current Potential ROE: %.2f%%" % self.potential_roe())
        if settings.CHECK_POSITION_LIMITS:
            logger.info("Position limits: %d/%d" %
                        (settings.MIN_POSITION, settings.MAX_POSITION))
        if position['currentQty'] != 0:
            logger.info("Avg Cost Price: %.*f" %
                        (tickLog, float(position['avgCostPrice'])))
            logger.info("Avg Entry Price: %.*f" %
                        (tickLog, float(position['avgEntryPrice'])))
        logger.info("Contracts Traded This Run: %d" %
                    (self.running_qty - self.starting_qty))
        logger.info("Total Contract Delta: %.4f XBT" %
                    self.exchange.calc_delta()['spot'])

    def get_ticker(self):
        ticker = self.exchange.get_ticker()
        tickLog = self.exchange.get_instrument()['tickLog']

        # Set up our buy & sell positions as the smallest possible unit above and below the current spread
        # and we'll work out from there. That way we always have the best price but we don't kill wide
        # and potentially profitable spreads.
        self.start_position_buy = ticker["buy"] + self.instrument['tickSize']
        self.start_position_sell = ticker["sell"] - self.instrument['tickSize']

        # If we're maintaining spreads and we already have orders in place,
        # make sure they're not ours. If they are, we need to adjust, otherwise we'll
        # just work the orders inward until they collide.
        if settings.MAINTAIN_SPREADS:
            if ticker['buy'] == self.exchange.get_highest_buy()['price']:
                self.start_position_buy = ticker["buy"]
            if ticker['sell'] == self.exchange.get_lowest_sell()['price']:
                self.start_position_sell = ticker["sell"]

        # Back off if our spread is too small.
        if self.start_position_buy * (
                1.00 + settings.MIN_SPREAD) > self.start_position_sell:
            self.start_position_buy *= (1.00 - (settings.MIN_SPREAD / 2))
            self.start_position_sell *= (1.00 + (settings.MIN_SPREAD / 2))

        # Midpoint, used for simpler order placement.
        self.start_position_mid = ticker["mid"]
        logger.info("%s Ticker: Buy: %.*f, Sell: %.*f" %
                    (self.instrument['symbol'], tickLog, ticker["buy"],
                     tickLog, ticker["sell"]))
        logger.info(
            'Start Positions: Buy: %.*f, Sell: %.*f, Mid: %.*f' %
            (tickLog, self.start_position_buy, tickLog,
             self.start_position_sell, tickLog, self.start_position_mid))
        self.ticks.append(self.start_position_mid)
        return ticker

    def get_price_offset(self, index):
        """Given an index (1, -1, 2, -2, etc.) return the price for that side of the book.
           Negative is a buy, positive is a sell."""
        # Maintain existing spreads for max profit
        if settings.MAINTAIN_SPREADS:
            start_position = self.start_position_buy if index < 0 else self.start_position_sell
            # First positions (index 1, -1) should start right at start_position, others should branch from there
            index = index + 1 if index < 0 else index - 1
        else:
            # Offset mode: ticker comes from a reference exchange and we define an offset.
            start_position = self.start_position_buy if index < 0 else self.start_position_sell

            # If we're attempting to sell, but our sell price is actually lower than the buy,
            # move over to the sell side.
            if index > 0 and start_position < self.start_position_buy:
                start_position = self.start_position_sell
            # Same for buys.
            if index < 0 and start_position > self.start_position_sell:
                start_position = self.start_position_buy

        return math.toNearest(start_position * (1 + settings.INTERVAL)**index,
                              self.instrument['tickSize'])

    ###
    # Orders
    ###

    def place_orders(self):
        """Create order items for use in convergence."""
        cross = self.analyze_history()
        logger.info("Cross: %d" % cross)

        # Create orders from the outside in. This is intentional - let's say the inner order gets taken;
        # then we match orders from the outside in, ensuring the fewest number of orders are amended and only
        # a new order is created in the inside. If we did it inside-out, all orders would be amended
        # down and a new order would be created at the outside.
        buy_orders = []
        sell_orders = []
        if cross > 0:
            # if the moving averages have crossed upward, create orders. If we're playing long open new Limit
            # MarkPrice buy orders. If not, then create Limit Close sell orders.
            for i in reversed(range(1, settings.ORDER_PAIRS + 1)):
                if not self.long_position_limit_exceeded():
                    buy_orders.append(
                        self.prepare_order(-i, settings.BIAS == 'Short'))
        elif cross < 0:
            # if the moving averages have crossed downward, create orders. When playing short open new Limit
            # MarkPrice sell orders. Otherwise, create Limit Close buy orders.
            for i in reversed(range(1, settings.ORDER_PAIRS + 1)):
                if not self.short_position_limit_exceeded():
                    sell_orders.append(
                        self.prepare_order(i, settings.BIAS == 'Long'))

        if cross is not 0:
            return self.converge_orders(buy_orders, sell_orders)

        roe = self.potential_roe()
        if roe <= settings.MIN_ROE:
            # We have gone below our minimum ROE which means we need to cut our losses. Close open
            # position at market price.
            delta = self.exchange.get_delta()
            index = delta / abs(delta)
            order = self.prepare_order(index, close=True)
            order['orderQty'] = delta
            order['price'] = self.start_position_mid
            logger.info(
                "ROE %.2f%% dropped below minimum %.2f%%, time to bail out. order: %s"
                % (roe, settings.MIN_ROE, order))
            self.exchange.create_bulk_orders([order])

    def prepare_order(self, index, close=False):
        """Create an order object."""

        if settings.RANDOM_ORDER_SIZE is True:
            quantity = random.randint(settings.MIN_ORDER_SIZE,
                                      settings.MAX_ORDER_SIZE)
        else:
            quantity = settings.ORDER_START_SIZE + (
                (abs(index) - 1) * settings.ORDER_STEP_SIZE)

        price = self.get_price_offset(index)
        order = {
            'price': price,
            'orderQty': quantity,
            'side': "Buy" if index < 0 else "Sell"
        }
        if close:
            order['execInst'] = 'Close'

        return order

    def converge_orders(self, buy_orders, sell_orders):
        """Converge the orders we currently have in the book with what we want to be in the book.
           This involves amending any open orders and creating new ones if any have filled completely.
           We start from the closest orders outward."""

        tickLog = self.exchange.get_instrument()['tickLog']
        to_amend = []
        to_create = []
        to_cancel = []
        buys_matched = 0
        sells_matched = 0
        existing_orders = self.exchange.get_orders()

        # Check all existing orders and match them up with what we want to place.
        # If there's an open one, we might be able to amend it to fit what we want.
        for order in existing_orders:
            try:
                if order['side'] == 'Buy':
                    desired_order = buy_orders[buys_matched]
                    buys_matched += 1
                else:
                    desired_order = sell_orders[sells_matched]
                    sells_matched += 1

                # Found an existing order. Do we need to amend it?
                if desired_order['orderQty'] != order['leavesQty'] or (
                        # If price has changed, and the change is more than our RELIST_INTERVAL, amend.
                        desired_order['price'] != order['price']
                        and abs((desired_order['price'] / order['price']) - 1)
                        > settings.RELIST_INTERVAL):
                    to_amend.append({
                        'orderID':
                        order['orderID'],
                        'orderQty':
                        order['cumQty'] + desired_order['orderQty'],
                        'price':
                        desired_order['price'],
                        'side':
                        order['side']
                    })
            except IndexError:
                # Will throw if there isn't a desired order to match. In that case, cancel it.
                to_cancel.append(order)

        while buys_matched < len(buy_orders):
            to_create.append(buy_orders[buys_matched])
            buys_matched += 1

        while sells_matched < len(sell_orders):
            to_create.append(sell_orders[sells_matched])
            sells_matched += 1

        if len(to_amend) > 0:
            for amended_order in reversed(to_amend):
                reference_order = [
                    o for o in existing_orders
                    if o['orderID'] == amended_order['orderID']
                ][0]
                logger.info(
                    "Amending %4s: %d @ %.*f to %d @ %.*f (%+.*f)" %
                    (amended_order['side'], reference_order['leavesQty'],
                     tickLog, reference_order['price'],
                     (amended_order['orderQty'] - reference_order['cumQty']),
                     tickLog, amended_order['price'], tickLog,
                     (amended_order['price'] - reference_order['price'])))
            # This can fail if an order has closed in the time we were processing.
            # The API will send us `invalid ordStatus`, which means that the order's status (Filled/Canceled)
            # made it not amendable.
            # If that happens, we need to catch it and re-tick.
            try:
                self.exchange.amend_bulk_orders(to_amend)
            except requests.exceptions.HTTPError as e:
                errorObj = e.response.json()
                if errorObj['error']['message'] == 'Invalid ordStatus':
                    logger.warn(
                        "Amending failed. Waiting for order data to converge and retrying."
                    )
                    sleep(0.5)
                    return self.place_orders()
                else:
                    logger.error("Unknown error on amend: %s. Exiting" %
                                 errorObj)
                    sys.exit(1)

        if len(to_create) > 0:
            logger.info("Creating %d orders:" % (len(to_create)))
            for order in reversed(to_create):
                logger.info("%4s %d @ %.*f" %
                            (order['side'], order['orderQty'], tickLog,
                             order['price']))
            self.exchange.create_bulk_orders(to_create)

        # Could happen if we exceed a delta limit
        if len(to_cancel) > 0:
            logger.info("Canceling %d orders:" % (len(to_cancel)))
            for order in reversed(to_cancel):
                logger.info("%4s %d @ %.*f" %
                            (order['side'], order['leavesQty'], tickLog,
                             order['price']))
            self.exchange.cancel_bulk_orders(to_cancel)

    ###
    # Position Limits
    ###

    def potential_roe(self):
        "Returns the potential return on equity of open positions"
        position = self.exchange.get_position()
        roe = position['unrealisedRoePcnt'] * 100.0
        # roe = (open position / current market price) * leverage
        return roe

    def short_position_limit_exceeded(self):
        "Returns True if the short position limit is exceeded"
        if not settings.CHECK_POSITION_LIMITS:
            return False
        position = self.exchange.get_delta()
        return position <= settings.MIN_POSITION

    def long_position_limit_exceeded(self):
        "Returns True if the long position limit is exceeded"
        if not settings.CHECK_POSITION_LIMITS:
            return False
        position = self.exchange.get_delta()
        return position >= settings.MAX_POSITION

    ###
    # Sanity
    ##

    def sanity_check(self):
        """Perform checks before placing orders."""

        # Check if OB is empty - if so, can't quote.
        self.exchange.check_if_orderbook_empty()

        # Ensure market is still open.
        self.exchange.check_market_open()

        # Get ticker, which sets price offsets and prints some debugging info.
        ticker = self.get_ticker()

        # Sanity check:
        if self.get_price_offset(-1) >= ticker[
                "sell"] or self.get_price_offset(1) <= ticker["buy"]:
            logger.error("Buy: %s, Sell: %s" %
                         (self.start_position_buy, self.start_position_sell))
            logger.error(
                "First buy position: %s\nBitMEX Best Ask: %s\nFirst sell position: %s\nBitMEX Best Bid: %s"
                % (self.get_price_offset(-1), ticker["sell"],
                   self.get_price_offset(1), ticker["buy"]))
            logger.error("Sanity check failed, exchange data is inconsistent")
            self.exit()

        # Messaging if the position limits are reached
        if self.long_position_limit_exceeded():
            logger.info("Long delta limit exceeded")
            logger.info("Current Position: %.f, Maximum Position: %.f" %
                        (self.exchange.get_delta(), settings.MAX_POSITION))

        if self.short_position_limit_exceeded():
            logger.info("Short delta limit exceeded")
            logger.info("Current Position: %.f, Minimum Position: %.f" %
                        (self.exchange.get_delta(), settings.MIN_POSITION))

    ###
    # Running
    ###

    def check_file_change(self):
        """Restart if any files we're watching have changed."""
        for f, mtime in watched_files_mtimes:
            if getmtime(f) > mtime:
                self.restart()

    def check_connection(self):
        """Ensure the WS connections are still open."""
        return self.exchange.is_open()

    def exit(self):
        logger.info("Shutting down. All open orders will be cancelled.")
        try:
            self.exchange.cancel_all_orders()
            self.exchange.bitmex.exit()
        except errors.AuthenticationError as e:
            logger.info("Was not authenticated; could not cancel orders.")
        except Exception as e:
            logger.info("Unable to cancel orders: %s" % e)

        sys.exit()

    def wait_until_next_check(self):
        """Wait until the exact next time to run."""
        # determine the next time to run based on aggression setting
        now = datetime.now()
        next_run_time = math.snap_time(now + self.step_size, settings.AGGRO)
        # find time remaining in whole seconds (int() rounds down)
        remaining = int((next_run_time - now).total_seconds())
        logger.info("Should run next at %s which is in %d seconds" %
                    (next_run_time, remaining))
        # wait until almost time - there will be less than a second left
        sleep(remaining)
        # wait until we get to the exact time we need to run next
        while datetime.now() <= next_run_time:
            pass

    def run_loop(self):
        while True:
            sys.stdout.write("-----\n")
            sys.stdout.flush()

            self.check_file_change()
            self.wait_until_next_check()

            # This will restart on very short downtime, but if it's longer,
            # the MM will crash entirely as it is unable to connect to the WS on boot.
            if not self.check_connection():
                logger.error(
                    "Realtime data connection unexpectedly closed, restarting."
                )
                self.restart()

            self.sanity_check(
            )  # Ensures health of mm - several cut-out points here
            self.print_status()  # Print skew, delta, etc
            self.place_orders(
            )  # Creates desired orders and converges to existing orders

    def restart(self):
        logger.info("Restarting the market maker...")
        os.execv(sys.executable, [sys.executable] + sys.argv)
Ejemplo n.º 6
0
class FileCache(object):
    """
    + Your library should be small and self contained.
    + Your library should use pymemcache or similar memcached client, along
      with the Python standard library, and any other resources.
    + Your library should accept any file size from 0 to 50MB. Files larger
      than 50MB should be rejected.
    + Your library should accept a file, chunk it, and store as bytes in
      Memcached with a minimum amount of overhead.
    + Your library should retrieve a file's chunks from Memcached and return a
      single stream of bytes.
    + Your library may chunk the file in any way appropriate.
    + Your library can key the chunks in any way appropriate.
    + Your library should check for file consistency to ensure the data
      retrieved is the same as the original data stored.
    + Your library should handle edge cases appropriately by raising an
      Exception or similar. Some examples of edge cases may include: trying to
      store a file that already exists, trying to retrieve a file that does
      not exist, or when a file retrieved is inconsistent/corrupt.
    - Your library should have at least two tests.
    """
    client = None

    def __init__(self, host='', port=0):
        """
        Initialize a FileCache object and connect the client via pymemcache
        :param host: Memcached server
        :param port: Memcached port
        """
        if not host:
            host = CacheDefaults.HOST.value
        if not port:
            port = CacheDefaults.PORT.value

        self.client = Client((host, port))

    def store(self, key, path_to_file) -> bool:
        """
        Store a file with a key

        :param key: Name of key to store in cache
        :param path_to_file: Path of file to store in cache
        :return: boolean True on success or raise exception
        """

        # ensure that key is provided
        if not key:
            raise FileCacheStoreException('Key not provided')

        # ensure that file exists
        if not os.path.exists(path_to_file):
            raise FileCacheStoreException('File does not exist')

        # ensure that file size is not too large
        if os.path.getsize(path_to_file) > CacheDefaults.MAX_FILE_SIZE.value:
            raise FileCacheStoreException('File size too large')

        # ensure that file isn't already cached
        chksum_in = get_md5_checksum(path_to_file)
        chksum_key = CacheDefaults.KEY_CHECKSUM.value.format(chksum_in)
        chksum_cached = self.client.get(chksum_key)

        if chksum_cached is not None:
            raise FileCacheStoreException('File is already cached')
        self.client.set(chksum_key, 1)

        # store count of chunks so that you can use it to reconstitute keys
        chunks = file_as_chunks(path_to_file, CacheDefaults.CHUNK_SIZE.value)
        self.client.set(CacheDefaults.KEY_NUMCHUNKS.value.format(key),
                        len(chunks))

        # iterate over each chunk and cache that piece
        for k, curr_chunk in enumerate(chunks):
            curr_key = CacheDefaults.KEY_CHUNK.value.format(key, k)
            self.client.set(curr_key, curr_chunk)

        return True

    def retrieve(self, key, path_to_outfile) -> bytes:

        # ensure that key is provided
        if not key:
            raise FileCacheRetrieveException('Key not provided')
        """
        To implement this successfully, I'd need to get the list of chunks 
        based on the key and reconstitute them (in order) by joining them 
        back together into one blob and return. No time! :(
        """
        # get list of chunks
        key_chunk_count = CacheDefaults.KEY_NUMCHUNKS.value.format(key)
        num_chunks = self.client.get(key_chunk_count)
        if num_chunks is None:
            raise FileCacheRetrieveException('No chunks for key provided')
        list_of_chunks = [
            CacheDefaults.KEY_CHUNK.value.format(key, j)
            for j in range(0, int(num_chunks))
        ]
        l = self.client.get_many(list_of_chunks)
        ret_bytes = b''.join(l.values())[:]

        with open(path_to_outfile, 'wb') as outfile:
            outfile.write(ret_bytes)
            chksum_written = get_md5_checksum(path_to_outfile)
            chksum_k = CacheDefaults.KEY_CHECKSUM.value.format(chksum_written)

            if self.client.get(chksum_k) is not None:
                return ret_bytes
            else:
                raise FileCacheRetrieveException('Checksums do not match')
Ejemplo n.º 7
0
class StandardServerConfigTests(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        print "PLEASE MAKE SURE THE SERVER IS NOT ALREADY RUNNING!"
        print "Note: this test run will start and stop the server for each test case, this may take a minute or two...\n"

    def setUp(self):
        self.startServer()
        self.client = Client(('localhost', 11211))

    def tearDown(self):
        self.killServer()

    def startServer(self):
        FNULL = open(os.devnull, 'w')
        path = os.path.dirname(os.path.realpath(__file__))
        proc = subprocess.Popen(["java", "-jar", path+"/../bin/mcsvr.jar"],
                                stdout=FNULL, stderr=subprocess.STDOUT)
        self.server_pid = proc.pid
        time.sleep(3)

    def killServer(self):
        time.sleep(1)
        os.kill(int(self.server_pid), signal.SIGTERM)

    def testGetSingle(self):
        self.client.set('first_key', 'first_value', 0, False)
        res = self.client.get('first_key')
        self.assertEqual(res, "first_value")

    def testGetMany(self):
        self.client.set('first_key', 'first_value', 0, False)
        self.client.set('second_key', 'second_value', 0, True)
        res = self.client.get_many(['first_key', 'second_key'])
        self.assertEqual(res["first_key"], "first_value")
        self.assertEqual(res["second_key"], "second_value")

    def testGetExpired(self):
        self.client.set('first_key', 'first_value', 0, False)
        self.client.set('first_key', 'first_value', 2, False)
        self.client.get_many(['first_key', 'second_key'])
        time.sleep(3)
        self.assertEqual(None, self.client.get('first_key'))

    def testGetDeleted(self):
        self.client.set('first_key', 'first_value', 0, False)
        res = self.client.get('first_key')
        self.assertEqual('first_value', res)
        self.client.delete('first_key')
        res = self.client.get('first_key')
        self.assertEqual(None, res)

    def testUpdateExpired(self):
        self.client.set('first_key', 'first_value', 2, False)
        time.sleep(3)
        self.client.set('first_key', 'second_value', 0, False)
        res = self.client.get('first_key')
        self.assertEqual('second_value', res)

    def testCaseUpdateOk(self):
        self.client.set('first_key', 'first_value', 0, False)
        res = self.client.gets('first_key')
        res = self.client.cas('first_key', 'second_value', res[1], 0, False)
        self.assertTrue(res)
        res = self.client.get('first_key')
        self.assertEqual('second_value', res)

    def testCaseUpdateInvalidUniq(self):
        self.client.set('first_key', 'first_value', 0, False)
        res = self.client.gets('first_key')
        casUniq = res[1]
        self.client.set('first_key', 'second_value', 0, False)
        res = self.client.cas('first_key', 'third_value', casUniq, 0, False)
        self.assertFalse(res)
        res = self.client.get('first_key')
        self.assertEqual('second_value', res)

    def testCaseUpdateNotOkOnExpired(self):
        self.client.set('first_key', 'first_value', 2, False)
        res = self.client.gets('first_key')
        time.sleep(3)
        res = self.client.cas('first_key', 'second_value', res[1], 0, False)
        self.assertEqual(None, res)

    def testCaseUpdateNotOkOnMissing(self):
        self.client.set('first_key', 'first_value', 0, False)
        res = self.client.gets('first_key')
        casUniq = res[1]
        self.client.delete('first_key')
        res = self.client.cas('first_key', 'second_value', casUniq, 0, False)
        self.assertEqual(None, res)