def report(self, miner, nonce, accepted): is_block, hash6, hash5 = self.sent[nonce] miner.share_count[if_else(accepted, 1, 0)] += 1 hash_ = if_else(is_block, hash6 + hash5, hash6) if self.options.verbose or is_block: say_line('%s %s%s, %s', (miner.id(), if_else(is_block, 'block ', ''), hash_, if_else(accepted, 'accepted', '_rejected_'))) del self.sent[nonce]
def report(self, miner, nonce, accepted): is_block, hash6, hash5 = self.sent[nonce] miner.share_count[if_else(accepted, 1, 0)] += 1 hash = if_else(is_block, hash6 + hash5, hash6) if self.options.verbose or is_block: say_line('%s %s%s, %s', (miner.id(), if_else(is_block, 'block ', ''), hash, if_else(accepted, 'accepted', '_rejected_'))) del self.sent[nonce]
def status_updated(self, miner): verbose = self.options.verbose rate = if_else(verbose, miner.rate, sum([m.rate for m in self.miners])) estimated_rate = if_else(verbose, miner.estimated_rate, sum([m.estimated_rate for m in self.miners])) rejected_shares = if_else(verbose, miner.share_count[0], sum([m.share_count[0] for m in self.miners])) total_shares = rejected_shares + if_else(verbose, miner.share_count[1], sum([m.share_count[1] for m in self.miners])) total_shares_estimator = max(total_shares, 1) say_quiet('%s[%.03f MH/s (~%d MH/s)] [Rej: %d/%d (%.02f%%)]', (if_else(verbose, miner.id()+' ', '') , rate, round(estimated_rate), rejected_shares, total_shares, float(rejected_shares) * 100 / total_shares_estimator))
def _log(self, level, message, format=None, print_it=True, display_level=True): """We enter a log message, we format it, and we add the log brok""" global obj global name global local_log global human_timestamp_log # ignore messages when message level is lower than Log level if level < self._level: return # We format the log in UTF-8 if isinstance(message, str): message = message.decode('UTF-8', 'replace') if format is None: lvlname = logging.getLevelName(level) fmt = u'[%%(date)s] %s%%(name)s%%(msg)s\n' % (if_else(display_level, '%(level)s : ', '')) args = { 'date' : if_else(human_timestamp_log, time.asctime(time.localtime(time.time())), int(time.time())), 'level': lvlname.capitalize(), 'name' : if_else(name is None, '', '[%s] ' % name), 'msg' : message } s = fmt % args else: s = format % message if print_it and len(s) > 1: # If the daemon is launched with a non UTF8 shell # we can have problems in printing try: print s[:-1] except UnicodeEncodeError: print s.encode('ascii', 'ignore') # We create and add the brok but not for debug that don't need # to do a brok for it, and so go in all satellites. Debug # should keep locally if level != logging.DEBUG: b = Brok('log', {'log': s}) obj.add(b) # If we want a local log write, do it if local_log is not None: logging.log(level, s.strip())
def binary_comp(comp): return concat( decr_sp(), load_stack_top_into_d(), decr_sp(), ['@SP', 'A=M', 'D=M-D'], if_else( comparator=comp, true_block=['@SP', 'A=M', 'M=-1'], # recall: in HW -1 represents true, 0 false false_block=['@SP', 'A=M', 'M=0']), incr_sp())
def initialize(options): if not OPENCL: return [] options.worksize = tokenize(options.worksize, 'worksize') options.frames = tokenize(options.frames, 'frames', [30]) options.frameSleep = tokenize(options.frameSleep, 'frameSleep', cast=float) options.vectors = if_else( options.old_vectors, [True], tokenize(options.vectors, 'vectors', [False], bool)) platforms = cl.get_platforms() if options.platform >= len(platforms) or (options.platform == -1 and len(platforms) > 1): print 'Wrong platform or more than one OpenCL platforms found, use --platform to select one of the following\n' for i in xrange(len(platforms)): print '[%d]\t%s' % (i, platforms[i].name) sys.exit() if options.platform == -1: options.platform = 0 devices = platforms[options.platform].get_devices() if not options.device and devices: print '\nOpenCL devices:\n' for i in xrange(len(devices)): print '[%d]\t%s' % (i, devices[i].name) print '\nNo devices specified, using all GPU devices\n' miners = [ OpenCLMiner(i, options) for i in xrange(len(devices)) if ((not options.device and devices[i].type == cl.device_type.GPU) or ( i in options.device)) ] for i in xrange(len(miners)): miners[i].worksize = options.worksize[min(i, len(options.worksize) - 1)] miners[i].frames = options.frames[min(i, len(options.frames) - 1)] miners[i].frameSleep = options.frameSleep[min( i, len(options.frameSleep) - 1)] miners[i].vectors = options.vectors[min(i, len(options.vectors) - 1)] miners[i].cutoff_temp = options.cutoff_temp[min( i, len(options.cutoff_temp) - 1)] miners[i].cutoff_interval = options.cutoff_interval[min( i, len(options.cutoff_interval) - 1)] return miners
def getwork(self, data=None): try: self.connection = self.ensure_connected(self.connection, self.server().proto, self.server().host)[0] self.postdata['params'] = if_else(data, [data], []) (self.connection, result) = self.request(self.connection, '/', self.headers, dumps(self.postdata)) self.switch.connection_ok() return result['result'] except (IOError, httplib.HTTPException, ValueError, socks.ProxyError, NotAuthorized, RPCError): self.stop() except Exception: say_exception()
def initialize(options): if not OPENCL: return [] options.worksize = tokenize(options.worksize, 'worksize') options.frames = tokenize(options.frames, 'frames', [30]) options.frameSleep = tokenize(options.frameSleep, 'frameSleep', cast=float) options.vectors = if_else(options.old_vectors, [True], tokenize(options.vectors, 'vectors', [False], bool)) platforms = cl.get_platforms() if options.platform >= len(platforms) or (options.platform == -1 and len(platforms) > 1): print 'Wrong platform or more than one OpenCL platforms found, use --platform to select one of the following\n' for i in xrange(len(platforms)): print '[%d]\t%s' % (i, platforms[i].name) sys.exit() if options.platform == -1: options.platform = 0 devices = platforms[options.platform].get_devices() if not options.device and devices: print '\nOpenCL devices:\n' for i in xrange(len(devices)): print '[%d]\t%s' % (i, devices[i].name) print '\nNo devices specified, using all GPU devices\n' miners = [ OpenCLMiner(i, options) for i in xrange(len(devices)) if ( (not options.device and devices[i].type == cl.device_type.GPU) or (i in options.device) ) ] for i in xrange(len(miners)): miners[i].worksize = options.worksize[min(i, len(options.worksize) - 1)] miners[i].frames = options.frames[min(i, len(options.frames) - 1)] miners[i].frameSleep = options.frameSleep[min(i, len(options.frameSleep) - 1)] miners[i].vectors = options.vectors[min(i, len(options.vectors) - 1)] miners[i].cutoff_temp = options.cutoff_temp[min(i, len(options.cutoff_temp) - 1)] miners[i].cutoff_interval = options.cutoff_interval[min(i, len(options.cutoff_interval) - 1)] return miners
def mining_thread(self): say_line('started OpenCL miner on platform %d, device %d (%s)', (self.options.platform, self.device_index, self.device_name)) (self.defines, rate_divisor, hashspace) = if_else(self.vectors, ('-DVECTORS', 500, 0x7FFFFFFF), ('', 1000, 0xFFFFFFFF)) self.defines += (' -DOUTPUT_SIZE=' + str(self.output_size)) self.defines += (' -DOUTPUT_MASK=' + str(self.output_size - 1)) self.load_kernel() frame = 1.0 / max(self.frames, 3) unit = self.worksize * 256 global_threads = unit * 10 queue = cl.CommandQueue(self.context) last_rated_pace = last_rated = last_n_time = last_temperature = time() base = last_hash_rate = threads_run_pace = threads_run = 0 output = np.zeros(self.output_size + 1, np.uint32) output_buffer = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY | cl.mem_flags.USE_HOST_PTR, hostbuf=output) self.kernel.set_arg(20, output_buffer) work = None temperature = 0 while True: if self.should_stop: return sleep(self.frameSleep) if (not work) or (not self.work_queue.empty()): try: work = self.work_queue.get(True, 1) except Empty: continue else: if not work: continue nonces_left = hashspace state = work.state state2 = work.state2 f = work.f self.kernel.set_arg(0, state[0]) self.kernel.set_arg(1, state[1]) self.kernel.set_arg(2, state[2]) self.kernel.set_arg(3, state[3]) self.kernel.set_arg(4, state[4]) self.kernel.set_arg(5, state[5]) self.kernel.set_arg(6, state[6]) self.kernel.set_arg(7, state[7]) self.kernel.set_arg(8, state2[1]) self.kernel.set_arg(9, state2[2]) self.kernel.set_arg(10, state2[3]) self.kernel.set_arg(11, state2[5]) self.kernel.set_arg(12, state2[6]) self.kernel.set_arg(13, state2[7]) self.kernel.set_arg(15, f[0]) self.kernel.set_arg(16, f[1]) self.kernel.set_arg(17, f[2]) self.kernel.set_arg(18, f[3]) self.kernel.set_arg(19, f[4]) if temperature < self.cutoff_temp: self.kernel.set_arg(14, pack('I', base)) cl.enqueue_nd_range_kernel(queue, self.kernel, (global_threads,), (self.worksize,)) nonces_left -= global_threads threads_run_pace += global_threads threads_run += global_threads base = uint32(base + global_threads) else: threads_run_pace = 0 last_rated_pace = time() sleep(self.cutoff_interval) now = time() if self.adapterIndex != None: t = now - last_temperature if temperature >= self.cutoff_temp or t > 1: last_temperature = now with adl_lock: temperature = self.get_temperature() t = now - last_rated_pace if t > 1: rate = (threads_run_pace / t) / rate_divisor last_rated_pace = now; threads_run_pace = 0 r = last_hash_rate / rate if r < 0.9 or r > 1.1: global_threads = max(unit * int((rate * frame * rate_divisor) / unit), unit) last_hash_rate = rate t = now - last_rated if t > self.options.rate: self.update_rate(now, threads_run, t, work.targetQ, rate_divisor) last_rated = now; threads_run = 0 queue.finish() cl.enqueue_read_buffer(queue, output_buffer, output) queue.finish() if output[self.output_size]: result = Object() result.header = work.header result.merkle_end = work.merkle_end result.time = work.time result.difficulty = work.difficulty result.target = work.target result.state = np.array(state) result.nonces = np.array(output) result.job_id = work.job_id result.extranonce2 = work.extranonce2 result.server = work.server result.miner = self self.switch.put(result) output.fill(0) cl.enqueue_write_buffer(queue, output_buffer, output) if not self.switch.update_time: if nonces_left < 3 * global_threads * self.frames: self.update = True nonces_left += 0xFFFFFFFFFFFF elif 0xFFFFFFFFFFF < nonces_left < 0xFFFFFFFFFFFF: say_line('warning: job finished, %s is idle', self.id()) work = None elif now - last_n_time > 1: work.time = bytereverse(bytereverse(work.time) + 1) state2 = partial(state, work.merkle_end, work.time, work.difficulty, f) calculateF(state, work.merkle_end, work.time, work.difficulty, f, state2) self.kernel.set_arg(8, state2[1]) self.kernel.set_arg(9, state2[2]) self.kernel.set_arg(10, state2[3]) self.kernel.set_arg(11, state2[5]) self.kernel.set_arg(12, state2[6]) self.kernel.set_arg(13, state2[7]) self.kernel.set_arg(15, f[0]) self.kernel.set_arg(16, f[1]) self.kernel.set_arg(17, f[2]) self.kernel.set_arg(18, f[3]) self.kernel.set_arg(19, f[4]) last_n_time = now self.update_time_counter += 1 if self.update_time_counter >= self.switch.max_update_time: self.update = True self.update_time_counter = 1
"Use --vv to specify per-device vectors usage." ) group.add_option('-p', '--platform', dest='platform', default=-1, help='use platform by id', type='int') group.add_option('-w', '--worksize', dest='worksize', default=[], help='work group size, default is maximum returned by OpenCL') group.add_option('-f', '--frames', dest='frames', default=[], help='will try to bring single kernel execution to 1/frames seconds, default=30, increase this for less desktop lag') group.add_option('-s', '--sleep', dest='frameSleep', default=[], help='sleep per frame in seconds, default 0') group.add_option('--vv', dest='vectors', default=[], help='use vectors, default false') group.add_option('-v', '--vectors', dest='old_vectors',action='store_true', help='use vectors') parser.add_option_group(group) (options, options.servers) = parser.parse_args() log.verbose = options.verbose log.quiet = options.quiet options.rate = if_else(options.verbose, max(options.rate, 60), max(options.rate, 0.1)) options.version = VERSION options.max_update_time = 60 options.device = tokenize(options.device, 'device', []) options.cutoff_temp = tokenize(options.cutoff_temp, 'cutoff_temp', [95], float) options.cutoff_interval = tokenize(options.cutoff_interval, 'cutoff_interval', [0.01], float) switch = None try: switch = Switch(options) if not options.no_ocl:
"Use --vv to specify per-device vectors usage." ) group.add_option('-p', '--platform', dest='platform', default=-1, help='use platform by id', type='int') group.add_option('-w', '--worksize', dest='worksize', default=[], help='work group size, default is maximum returned by OpenCL') group.add_option('-f', '--frames', dest='frames', default=[], help='will try to bring single kernel execution to 1/frames seconds, default=30, increase this for less desktop lag') group.add_option('-s', '--sleep', dest='frameSleep', default=[], help='sleep per frame in seconds, default 0') group.add_option('--vv', dest='vectors', default=[], help='use vectors, default false') group.add_option('-v', '--vectors', dest='old_vectors',action='store_true', help='use vectors') parser.add_option_group(group) (options, options.servers) = parser.parse_args() log.verbose = options.verbose log.quiet = options.quiet options.rate = if_else(options.verbose, options.rate, max(options.rate, 0.1)) options.version = VERSION options.max_update_time = 60 options.device = tokenize(options.device, 'device', []) options.cutoff_temp = tokenize(options.cutoff_temp, 'cutoff_temp', [95], float) options.cutoff_interval = tokenize(options.cutoff_interval, 'cutoff_interval', [0.01], float) switch = None try: switch = Switch(options) if not options.no_ocl:
def report(self, nonce, accepted): is_block, hash6, hash5 = self.sent[nonce] self.miner.share_found(if_else(is_block, hash6 + hash5, hash6), accepted, is_block) del self.sent[nonce]