Esempio n. 1
0
 def __init__(self, nonceRange, core, vectors, aggression):
     # Prepare some raw data, converting it into the form that the OpenCL
     # function expects.
     data = np.array(
            unpack('IIII', nonceRange.unit.data[64:]), dtype=np.uint32)
     
     # Vectors do twice the work per execution, so calculate accordingly...
     rateDivisor = 2 if vectors else 1
     
     # get the number of iterations from the aggression and size
     self.iterations = int((nonceRange.size / (1 << aggression)))
     self.iterations = max(1, self.iterations)
     
     #set the size to pass to the kernel based on iterations and vectors
     self.size = (nonceRange.size / rateDivisor) / self.iterations
     
     #compute bases for each iteration
     self.base = [None] * self.iterations
     for i in range(self.iterations):
         self.base[i] = pack('I',
             (nonceRange.base/rateDivisor) + (i * self.size))
     
     #set up state and precalculated static data
     self.state = np.array(
         unpack('IIIIIIII', nonceRange.unit.midstate), dtype=np.uint32)
     self.state2 = np.array(unpack('IIIIIIII',
         calculateMidstate(nonceRange.unit.data[64:80] +
             '\x00\x00\x00\x80' + '\x00'*40 + '\x80\x02\x00\x00',
             nonceRange.unit.midstate, 3)), dtype=np.uint32)
     self.state2 = np.array(
         list(self.state2)[3:] + list(self.state2)[:3], dtype=np.uint32)
     self.nr = nonceRange
     
     self.f = np.zeros(8, np.uint32)
     self.calculateF(data)
Esempio n. 2
0
 def storeWork(self, wu):
     
     #check if this work matches the previous block
     if self.lastBlock is not None and (wu.data[4:36] == self.lastBlock):
         self.logger.reportDebug('Server gave work from the previous '
                                 'block, ignoring.')
         #if the queue is too short request more work
         if (len(self.queue)) < (self.queueSize):
             self.miner.connection.requestWork()
         return
     
     #create a WorkUnit
     work = WorkUnit()
     work.data = wu.data
     work.target = wu.target
     work.midstate = calculateMidstate(work.data[:64])
     work.nonces = 2 ** wu.mask
     work.base = 0
     
     #check if there is a new block, if so reset queue
     newBlock = (wu.data[4:36] != self.block)
     if newBlock:
         self.queue.clear()
         self.currentUnit = None
         self.lastBlock = self.block
         self.block = wu.data[4:36]
         self.logger.reportDebug("New block (WorkQueue)")
     
     #clear the idle flag since we just added work to queue
     self.idle = False
     self.miner.reportIdle(False)
     
     #add new WorkUnit to queue
     if work.data and work.target and work.midstate and work.nonces:
         self.queue.append(work)
     
     #if the queue is too short request more work
     if (len(self.queue)) < (self.queueSize):
         self.miner.connection.requestWork()
     
     #if there is a new block notify kernels that their work is now stale
     if newBlock:
         for callback in self.staleCallbacks:
             callback()
     
     #check if there are deferred NonceRange requests pending
     #since requests to fetch a NonceRange can add additional deferreds to
     #the queue, cache the size beforehand to avoid infinite loops.
     for i in range(len(self.deferredQueue)):
         df, size = self.deferredQueue.popleft()
         d = self.fetchRange(size)
         d.chainDeferred(df)
Esempio n. 3
0
 def callback(wu):
     work = self.assignedWork.setdefault(account.id, list())
     work.append(wu)
     
     padding = '00000080' + '00000000'*10 + '80020000'
     hash1 = '00000000'*8 + '00000080' + '00000000'*6 + '00010000'
     
     return {
             "midstate": calculateMidstate(wu.data[:64]).encode('hex'),
             "data": wu.data.encode('hex') + padding,
             "hash1": hash1,
             "target": wu.target.encode('hex'),
             "mask": wu.mask
            }
Esempio n. 4
0
        def callback(wu):
            work = self.assignedWork.setdefault(account.id, list())
            work.append(wu)

            padding = '00000080' + '00000000' * 10 + '80020000'
            hash1 = '00000000' * 8 + '00000080' + '00000000' * 6 + '00010000'

            return {
                "midstate": calculateMidstate(wu.data[:64]).encode('hex'),
                "data": wu.data.encode('hex') + padding,
                "hash1": hash1,
                "target": wu.target.encode('hex'),
                "mask": wu.mask
            }
Esempio n. 5
0
    def __init__(self, nonceRange, core, rateDivisor, aggression):
        # Prepare some raw data, converting it into the form that the OpenCL
        # function expects.
        data = np.array(unpack('IIII', nonceRange.unit.data[64:]),
                        dtype=np.uint32)

        # get the number of iterations from the aggression and size
        self.iterations = int(nonceRange.size / (1 << aggression))
        self.iterations = max(1, self.iterations)

        #set the size to pass to the kernel based on iterations and vectors
        self.size = (nonceRange.size / rateDivisor) / self.iterations
        self.totalsize = nonceRange.size
        #compute bases for each iteration

        self.base = [None] * self.iterations
        for i in range(self.iterations):
            if rateDivisor == 1:
                self.base[i] = pack('I', ((nonceRange.base) +
                                          (i * self.size * rateDivisor)))
            if rateDivisor == 2:
                self.base[i] = pack(
                    'II', ((nonceRange.base) + (i * self.size * rateDivisor)),
                    (1 + (nonceRange.base) + (i * self.size * rateDivisor)))
            if rateDivisor == 4:
                self.base[i] = pack(
                    'IIII',
                    ((nonceRange.base) + (i * self.size * rateDivisor)),
                    (1 + (nonceRange.base) + (i * self.size * rateDivisor)),
                    (2 + (nonceRange.base) + (i * self.size * rateDivisor)),
                    (3 + (nonceRange.base) + (i * self.size * rateDivisor)))
        #set up state and precalculated static data
        self.state = np.array(unpack('IIIIIIII', nonceRange.unit.midstate),
                              dtype=np.uint32)
        self.state2 = np.array(unpack(
            'IIIIIIII',
            calculateMidstate(
                nonceRange.unit.data[64:80] + '\x00\x00\x00\x80' +
                '\x00' * 40 + '\x80\x02\x00\x00', nonceRange.unit.midstate,
                3)),
                               dtype=np.uint32)
        self.state2 = np.array(list(self.state2)[3:] + list(self.state2)[:3],
                               dtype=np.uint32)
        self.nr = nonceRange

        self.f = np.zeros(9, np.uint32)
        self.calculateF(data)
Esempio n. 6
0
    def __init__(self, nonceRange, core, rateDivisor, aggression):
        # Prepare some raw data, converting it into the form that the OpenCL
        # function expects.
        data   = np.array(
            unpack('IIII', nonceRange.unit.data[64:]), dtype=np.uint32)

        # get the number of iterations from the aggression and size
        self.iterations = int(nonceRange.size / (1 << aggression))
        self.iterations = max(1, self.iterations)

        #set the size to pass to the kernel based on iterations and vectors
        self.size = (nonceRange.size / rateDivisor) / self.iterations
        self.totalsize = nonceRange.size
        #compute bases for each iteration

        self.base = [None] * self.iterations
        for i in range(self.iterations):
            if rateDivisor == 1:
                self.base[i] = pack('I',
                    ((nonceRange.base) + (i * self.size * rateDivisor)))
            if rateDivisor == 2:
                self.base[i] = pack('II',
                    ((nonceRange.base) + (i * self.size * rateDivisor))
                    , (1 + (nonceRange.base) + (i * self.size * rateDivisor)))
            if rateDivisor == 4:
                self.base[i] = pack('IIII',
                    ((nonceRange.base) + (i * self.size * rateDivisor))
                    , (1 + (nonceRange.base) + (i * self.size * rateDivisor))
                    , (2 + (nonceRange.base) + (i * self.size * rateDivisor))
                    , (3 + (nonceRange.base) + (i * self.size * rateDivisor))
                    )
        #set up state and precalculated static data
        self.state  = np.array(
            unpack('IIIIIIII', nonceRange.unit.midstate), dtype=np.uint32)
        self.state2 = np.array(unpack('IIIIIIII',
            calculateMidstate(nonceRange.unit.data[64:80] +
                '\x00\x00\x00\x80' + '\x00'*40 + '\x80\x02\x00\x00',
                nonceRange.unit.midstate, 3)), dtype=np.uint32)
        self.state2 = np.array(
            list(self.state2)[3:] + list(self.state2)[:3], dtype=np.uint32)
        self.nr = nonceRange

        self.f = np.zeros(9, np.uint32)
        self.calculateF(data)
Esempio n. 7
0
    def storeWork(self, wu):
        #check if this work matches the previous block
        if self.lastBlock is not None and wu.data[4:36] == self.lastBlock:
            self.logger.reportDebug('Server gave work from the previous '
                                    'block (I think), ignoring.')
            # sometimes an LP finishes before a normal getwork completes, and we get confused
            # about which block is "current", so unset lastBlock to avoid an infinite loop
            self.lastBlock = None
            #if the queue is too short request more work
            if (self.rollntime and len(self.queue) < 1) or len(self.queue) < self.queueSize:
                self.miner.connection.requestWork()
            return

        #create a WorkUnit
        work = WorkUnit()
        work.data = wu.data
        work.target = wu.target
        work.midstate = calculateMidstate(work.data[:64])
        work.nonces = 2 ** wu.mask
        work.base = 0

        #check if there is a new block, if so reset queue
        newBlock = (wu.data[4:36] != self.block)
        if newBlock:
            self.clearQueue()
            self.currentUnit = None
            self.lastBlock = self.block
            self.block = wu.data[4:36]
            self.logger.reportDebug("New block (WorkQueue)")
        
        timeNow = time()
        receivedAt = wu.receivedAt if wu.receivedAt is not None else timeNow
        timeModifier = timeNow - receivedAt + self.miner.connection.getMaxSubmitTime() + 3
        work.expirationTime = timeNow + self.defaultWorkExpiration - timeModifier
        if (self.rollntime and
            wu.rollntime is not None and
            wu.rollntime.lower() not in ('n', 'no', 'f', 'false', '0')):
            work.rollntime = True

            if "expire=" in wu.rollntime.lower():
                try:
                    work.expirationTime = timeNow + int(wu.rollntime.split('=')[1]) - timeModifier
                except:
                    self.logger.log("Failed to parse expiration time. Using default.")

            work.lastRollTime = timeNow
        else:
            work.rollntime = False

        #add new WorkUnit to queue
        if work.data and work.target and work.midstate and work.nonces:
            # set workId so we can identify this work even if its data changes
            work.workId = self.lastWorkId = self.lastWorkId + 1
            self.queue.append(work)

        #clear the idle flag since we just added work to queue
        self.miner.reportIdle(False)

        #if the queue is too short request more work
        if not work.rollntime and len(self.queue) < self.queueSize:
            self.miner.connection.requestWork()
        
        #if there is a new block notify kernels that their work is now stale
        if newBlock:
            for callback in self.staleCallbacks:
                callback()
        
        #check if there are deferred NonceRange requests pending
        #since requests to fetch a NonceRange can add additional deferreds to
        #the queue, cache the size beforehand to avoid infinite loops.
        for i in range(len(self.deferredQueue)):
            df, size = self.deferredQueue.popleft()
            d = self.fetchRange(size)
            d.chainDeferred(df)

        #tell work prefetch loop that we're not currently working on getting new work
        self.requestingWork = False