def load(self, file=None): reassign_id = policy.get_policy().get(policy.REASSIGN_ID) file = file or self.queue_file cfg = ConfigParser() cfg.read(file) self.log.debug("read %s\n" % file) sections = cfg.sections() if reassign_id: def _cmp(sa, sb): ia, ib = int(cfg.get(sa, "id")), int(cfg.get(sb, "id")) return cmp(ia, ib) sections.sort(_cmp) for section in sections: self.log.debug("added %s\n" % section) try: j = QueueEntry(cfg.get(section, "torrent"), id_generator=None) j.load(cfg) if reassign_id: j.id = id_generator.generate() self.add(j, save=False) except NoOptionError: pass except IOError: pass self.q.sort()
def init(self): import tensorflow as tf self.env = self.env_producer.get_new_environment() self.s0 = self.env.reset() self.session = utils.create_session(self.env_opts, False) with tf.device("/cpu:0"): with tf.variable_scope("gather-%s" % self.idx): pol = get_policy(self.env_opts, self.session) self.agent = PPOAgent(pol, self.session, "gather-%s" % self.idx, self.env_opts) self.trainable_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, "gather-%s" % self.idx) self.accum_vars = [ tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in self.trainable_vars ] assign_ops = [ self.trainable_vars[i].assign(self.accum_vars[i]) for i in range(len(self.trainable_vars)) ] self.assign_op = tf.group(assign_ops) self.session.run(tf.global_variables_initializer()) self.cur_hidden_state = self.agent.get_init_hidden_state() self.episode = [self.s0], [], [], [], [], [self.cur_hidden_state ], []
def main(): init() save_crop() logging.info("start at: {}".format(datetime.datetime.now())) play = get_policy() while True: action = play.action() if action: get_action_by_name(action).execute()
def __init__(self,RequestHandlerClass,queue): Thread.__init__(self) self.queue = queue self.policy = policy.get_policy() self._quit = 0 addr = self.policy(policy.WEBSERVICE_IP), \ self.policy(policy.WEBSERVICE_PORT) try: SocketServer.ThreadingTCPServer.__init__(self,addr,RequestHandlerClass) except Exception,why: raise BindException,'%s:%d: %s' % (addr+(str(why),))
def __init__(self,controller,dispatch,error): Thread.__init__(self) self.policy = policy.get_policy() self.controller = controller self.do_dispatch = dispatch self.error = error self.queue = Queue() self.queue.load() self.lock = Lock() self._quit = Event() self.add_queue = SyncQueue(0) self.num_run = 0 self.log = get_logger()
def __init__(self,updated=None): self.updated = updated self.share_path = os.path.join(sys.prefix,'share','BTQueue') self.program_path = os.path.dirname(os.path.abspath(sys.argv[0])) pol = policy.get_policy() self.file = pol.get_path(ALIAS_FILE,writable=1) if sys.platform == 'win32' or not os.environ.get('HOME'): default_path = self.program_path else: default_path = os.environ.get('HOME') self.aliases = ConfigParser() for prefix in [self.share_path,self.program_path]: self.load(os.path.join(prefix,ALIAS_FILE)) self.load() self.save()
def __init__(self,output,args=[],banfunc=None): Thread.__init__(self) self.output = output config,args = parseargs(args,defaults,0,0) self.config = config self.banfunc = banfunc self.policy = policy.get_policy() #self.torrent_dir = config['torrent_dir'] #self.torrent_cache = {} #self.file_cache = {} #self.blocked_files = {} #self.scan_period = config['parse_dir_interval'] self.stats_period = config['display_interval'] self.torrent_list = [] self.downloads = {} self.counter = 0 self.doneflag = Event() self.hashcheck_queue = [] self.hashcheck_current = None self.rawserver = RawServer(self.doneflag, config['timeout_check_interval'], config['timeout'], ipv6_enable = config['ipv6_enabled'], failfunc = self.failed, errorfunc = self.exchandler) self.upnp = UPnP_test(self.policy(policy.UPNP_NAT_ACCESS)) while True: try: if self.policy(policy.USE_SINGLE_PORT): self.listen_port = self.rawserver.find_and_bind( config['minport'], config['maxport'], config['bind'], ipv6_socket_style = config['ipv6_binds_v4'], upnp = self.upnp, randomizer = self.policy(policy.RANDOM_PORT)) else: self.listen_port = None break except socketerror, e: if self.upnp and e == UPnP_ERROR: self.output.message('WARNING: COULD NOT FORWARD VIA UPnP') self.upnp = 0 continue self.failed("Couldn't listen - " + str(e)) return
def __init__(self, file, priority=-1, dest_path="", id_generator=id_generator): if id_generator: self.id = id_generator.generate() else: self.id = 0 self.file = file self.global_policy = policy.get_policy() if priority >= 0: self.priority = priority else: self.priority = self.global_policy(policy.DEFAULT_PRIORITY) self.local_policy = policy.EntryPolicy() self.state = STATE_WAITING self.done_flag = Event() self.dest_path = dest_path self.dlsize = self.ulsize = self.old_dlsize = self.old_ulsize = 0 self.error = "" self.gather_info() self.statistics = None self.spew = None self.dow = self.listen_port = None self.share_ratio = 0.0 self.currentseed = "?" self.currentpeer = "?" self.recheck = 0 self.added_time = -1 self.started_time = -1 self.finished_time = -1 self.stopped_time = -1 self.added() self.update_info( fractionDone=0.0, timeEst=0.0, downRate=0.0, upRate=0.0, activity="None", statistics=None, spew=None, sizeDone=0, force=True, )
def init_agent(self): import tensorflow as tf env_opts = environments.get_env_options( self.env_name, self.env_producer.get_use_gpu()) self.session = utils.create_session(env_opts, True) with tf.variable_scope("worker-%s" % self.idx): pol = get_policy(env_opts, self.session) self.agent = PPOAgent(pol, self.session, "worker-%s" % self.idx, env_opts) self.trainable_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, "worker-%s" % self.idx) self.accum_vars = [ tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in self.trainable_vars ] p_vars = self.agent.p_opt.variables() v_vars = self.agent.v_opt.variables() self.p_opt_vars = [ tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in p_vars ] self.v_opt_vars = [ tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in v_vars ] p_assign_ops = [ p_vars[i].assign(self.p_opt_vars[i]) for i in range(len(p_vars)) ] v_assign_ops = [ v_vars[i].assign(self.v_opt_vars[i]) for i in range(len(v_vars)) ] assign_ops = [ self.trainable_vars[i].assign(self.accum_vars[i]) for i in range(len(self.trainable_vars)) ] self.assign_op = tf.group(assign_ops + p_assign_ops + v_assign_ops) self.session.run(tf.global_variables_initializer()) self.run()
def __init__(self): self.policy = policy.get_policy() self.addr = (self.policy(policy.WEBSERVICE_IP), self.policy(policy.WEBSERVICE_PORT))
def __init__(self): self.q = [] self.queue_file = policy.get_policy().get_path(policy.QUEUE_FILE) # self.queue_file = os.path.join(root_path,QUEUE_FILE) self.log = get_logger()
def __init__(self): self.history_file = policy.get_policy().get_path(policy.HISTORY_FILE) # self.history_file = os.path.join(root_path,HISTORY_FILE) self.new_history = [] self.history = []
def __init__(self, name, cord): super(ContinueMatchAction, self).__init__(name, cord) self.policy = get_policy()
def start(self): import tensorflow as tf self.summary_writer = tf.summary.FileWriter("logs/%s" % self.env_opts["env_name"]) self.session = utils.create_session(self.env_opts, True) with tf.variable_scope("master-0"): pol = get_policy(self.env_opts, self.session) self.agent = PPOAgent(pol, self.session, "master-0", self.env_opts) self.trainable_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, "master-0") self.accum_vars = [ tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in self.trainable_vars ] p_vars = self.agent.p_opt.variables() v_vars = self.agent.v_opt.variables() self.p_opt_vars = [ tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in p_vars ] self.v_opt_vars = [ tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in v_vars ] p_assign_ops = [ p_vars[i].assign(self.p_opt_vars[i]) for i in range(len(p_vars)) ] v_assign_ops = [ v_vars[i].assign(self.v_opt_vars[i]) for i in range(len(v_vars)) ] assign_ops = [ self.trainable_vars[i].assign(self.accum_vars[i]) for i in range(len(self.trainable_vars)) ] self.assign_op = tf.group(assign_ops + p_assign_ops + v_assign_ops) self.restore_variables() self.saver = tf.train.Saver(max_to_keep=1) self.session.run(tf.global_variables_initializer()) try: self.saver = tf.train.import_meta_graph( tf.train.latest_checkpoint("models/%s/" % self.env_opts["env_name"]) + ".meta") self.saver.restore( self.session, tf.train.latest_checkpoint("models/%s/" % self.env_opts["env_name"])) except: print("failed to restore model") while True: if self.iter_count % 10 == 0: print("Saving model...") self.save_variables() self.saver.save(self.session, self.model_path, self.iter_count) print("Model saved") self.broadcast_weights() self.merge_weights() self.iter_count += 1
from sha import sha #from string import substring import re import binascii import sys,os import urlparse,urllib from BitTornado.bencode import * from BitTornado.zurllib import urlopen as zurlopen from BitCrawler.aurllib import urlopen as aurlopen import timeoutsocket import policy timeout = policy.get_policy().get(policy.DEFAULT_SOCKET_TIMEOUT) timeoutsocket.setDefaultSocketTimeout(timeout) del timeout del policy def announce_to_scrape(url): items = list(urlparse.urlparse(url)) path = items[2] return urlparse.urljoin(url, os.path.basename(path).replace('announce','scrape')) def get_scrape_by_announce_url(announce_url,infohash): seeder,peerer = '?','?' if not announce_url: return seeder,peerer scrape_url = announce_to_scrape(announce_url) len_hash = len(infohash)
def main(): model_path = os.path.join(FLAGS.model_path, FLAGS.agent_mode, FLAGS.policy, FLAGS.map_name) full_log_path = os.path.join(FLAGS.log_path, FLAGS.agent_mode, FLAGS.policy, FLAGS.map_name) replay_path = os.path.join(FLAGS.replay_dir, FLAGS.agent_mode, FLAGS.policy, FLAGS.map_name) if not os.path.exists(replay_path): os.makedirs(replay_path) if FLAGS.agent_mode == Mode.A2C: from runner.a2c_runner import Runner env_args = dict(map_name=FLAGS.map_name, step_mul=FLAGS.step_mul, game_steps_per_episode=0, screen_size_px=(FLAGS.resolution, ) * 2, minimap_size_px=(FLAGS.resolution, ) * 2, visualize=FLAGS.visualize, save_replay_episodes=FLAGS.save_replay_episodes, replay_dir=replay_path) agent_args = dict(spatial_dim=FLAGS.resolution, unit_type_emb_dim=FLAGS.unit_type_emb_dim, loss_value_weight=FLAGS.loss_value_weight, entropy_weight_action_id=FLAGS.entropy_weight_action, entropy_weight_spatial=FLAGS.entropy_weight_spatial, policy=get_policy(FLAGS.policy), summary_path=full_log_path) if FLAGS.local_t_max is None: n_steps_per_batch = 16 else: n_steps_per_batch = FLAGS.local_t_max trainer_args = dict(gamma=FLAGS.gamma, n_steps=n_steps_per_batch) runner = Runner(env_args, agent_args, trainer_args, model_path) runner.run() elif FLAGS.agent_mode == Mode.PPO: from runner.ppo_runner import Runner env_args = dict(map_name=FLAGS.map_name, step_mul=FLAGS.step_mul, game_steps_per_episode=0, screen_size_px=(FLAGS.resolution, ) * 2, minimap_size_px=(FLAGS.resolution, ) * 2, visualize=FLAGS.visualize, save_replay_episodes=FLAGS.save_replay_episodes, replay_dir=replay_path) agent_args = dict(spatial_dim=FLAGS.resolution, unit_type_emb_dim=FLAGS.unit_type_emb_dim, loss_value_weight=FLAGS.loss_value_weight, entropy_weight_action_id=FLAGS.entropy_weight_action, entropy_weight_spatial=FLAGS.entropy_weight_spatial, policy=get_policy(FLAGS.policy), summary_path=full_log_path) if FLAGS.local_t_max is None: n_steps_per_batch = 128 else: n_steps_per_batch = FLAGS.local_t_max trainer_args = dict(gamma=FLAGS.gamma, n_steps=n_steps_per_batch, lambda_par=FLAGS.ppo_lambda, batch_size=FLAGS.ppo_batch_size or n_steps_per_batch, n_epochs=FLAGS.ppo_epochs) runner = Runner(env_args, agent_args, trainer_args, model_path) runner.run() elif FLAGS.agent_mode == Mode.UNREAL: from runner.unreal_runner import Runner model_path = os.path.join(FLAGS.model_path, FLAGS.agent_mode, FLAGS.map_name) full_log_path = os.path.join(FLAGS.log_path, FLAGS.agent_mode, FLAGS.map_name) replay_path = os.path.join(FLAGS.replay_dir, FLAGS.agent_mode, FLAGS.map_name) env_args = dict(map_name=FLAGS.map_name, step_mul=FLAGS.step_mul, game_steps_per_episode=0, screen_size_px=(FLAGS.resolution, ) * 2, minimap_size_px=(FLAGS.resolution, ) * 2, visualize=FLAGS.visualize, save_replay_episodes=FLAGS.save_replay_episodes, replay_dir=replay_path) runner = Runner(env_args, full_log_path, model_path) runner.run()
def _decode_peer_id(id): quoted_id = id if id[:3] == "0x ": id = a2b_hex(id[3:]) if id[0] == '"' and id[-1] == '"': id = id[1:-1] if len(id) == 40: id = a2b_hex(id) if id[0] == "S": if id[6 : 6 + 3] == "---": ver = list(id[1 : 1 + 3]) for i in range(3): ver[i] = str(int(ver[i], 16)) name = "S " + ".".join(ver) return name if ord(id[8]) == 0: ver = list(id[1 : 1 + 3]) for i in range(3): ver[i] = str(ord(ver[i])) name = "S " + ".".join(ver) return name if id[4 : 4 + 7] == "Plus---": ver = list(id[1 : 1 + 3]) for i in range(3): ver[i] = str(int(ver[i], 16)) name = "S " + ".".join(ver) + " Plus" return name if id[0] == "T": if id[6 : 6 + 3] == "---": ver = list(id[1:4]) for i in range(3): ver[i] = str(int(ver[i], 16)) name = "BT " + ".".join(ver) return name if id[6] in mapbase64 and id[7] in mapbase64 and id[8] in mapbase64: ver = list(id[1:4]) for i in range(3): ver[i] = str(int(ver[i], 16)) name = "BT " + ".".join(ver) + " M" return name if id[0] == "A": if id[6 : 6 + 3] == "---": ver = list(id[1:4]) for i in range(3): ver[i] = str(int(ver[i], 16)) name = "ABC " + ".".join(ver) return name if id[6] in mapbase64 and id[7] in mapbase64 and id[8] in mapbase64: ver = list(id[1:4]) for i in range(3): ver[i] = str(int(ver[i], 16)) name = "ABC " + ".".join(ver) + " M" return name if id[0] == "Q": if id[6 : 6 + 3] == "---" or (id[6] in mapbase64 and id[7] in mapbase64 and id[8] in mapbase64): ver = list(id[1:4]) for i in range(3): ver[i] = str(int(ver[i], 16)) ver_str = ".".join(ver) name = "BTQ " + ver_str if cmp_version(ver_str, "0.0.7") >= 0: build = decode_build(id[9 : 9 + 3]) if build: name += " " + str(build) return name elif id[1] in mapbase64[:16] and id[2] in mapbase64[:16] and id[3] in mapbase64[:16]: ver = list(id[1:4]) for i in range(3): ver[i] = str(int(ver[i], 16)) ver_str = ".".join(ver) name = "BTQ " + ver_str return name if id[1 : 1 + 2] == "AZ": name = "Az " + ".".join(list(id[3 : 3 + 4])) return name if id[5 : 5 + 7] == "Azureus": name = "Az 2.0.3.2" return name if id[2 : 2 + 2] == "BS": if ord(id[1]) == 0: name = "BS v1" if ord(id[1]) == 2: name = "BS v2" return name if id[0] == "U": if id[8] == "-": name = "UPnP " + ".".join(list(id[1 : 1 + 3])) return name if id[0] == "M" and id[2] == "-" and id[4] == "-" and id[6 : 6 + 2] == "--": name = "ML %s.%s.%s" % (id[1], id[3], id[5]) return name if id[:4] == "exbc": name = "BC " name += "%c.%c%c" % (str(ord(id[4])), str(ord(id[5]) / 10), str(ord(id[5]) % 10)) return name if id[:7] == "turbobt": name = "TBT " + id[7 : 7 + 5] return name if id[:12] == "-G3g3rmz ": name = "G3" return name if id[:3] == "-G3": name = "G3" return name if id[:7] == "Plus---" or (id[:4] == "Plus" and id[7] == "-"): name = "BT Plus" return name if id[:16] == "Deadman Walking-" or id[:6] == "BTDWV-": name = "Deadman" return name if id[1 : 1 + 2] == "LT": name = "libt " + ".".join(list(id[3 : 3 + 4])) return name if id[1 : 1 + 2] == "TS": name = "TS " + ".".join(list(id[3 : 3 + 4])) return name if id[1 : 1 + 2] == "MT": name = "MT " + ".".join(list(id[3 : 3 + 4])) return name if id[:12] == "\000\000\000\000\000\000\000\000\000\003\003\003": name = "Snark" return name if id[:5] == "btuga": name = "BTugaXP" return name if id[4 : 4 + 6] == "btfans": name = "SBT" return name if id[:10] == "DansClient": name = "XT" return name if id[:14] == "\000\000\000\000\000\000\000\000\000\000\000\000aa": name = "Exp 3.2.1b2" return name if id[:14] == "\000\000\000\000\000\000\000\000\000\000\000\000\000\000": name = "Exp 3.1" return name if id[:12] == "\000\000\000\000\000\000\000\000\000\000\000\000": name = "Generic" return name if id[:2] == "[]": return "N/A" pol = policy.get_policy() if pol(policy.LOG_UNKNOWN_ID): unknown_log = pol.get_path(policy.UNKNOWN_ID_FILE) try: fd = open(unknown_log, "a") fd.write(id + " " + quoted_id + "\n") fd.close() except Exception, why: pass
def __init__(self,progname,debug_level=WARN): pol = policy.get_policy() self.file = pol.get_path(progname+'.log') self.debug_level = pol(policy.DEBUG_LEVEL)