def __init__(self, network_w=None, network_b=None, white_name='net', black_name='net', random_switch=True, recoard_game=True, recoard_dir='data/distributed/', play_times=np.inf, distributed_server=None, distributed_dir='data/download_weight', **xargs): self.network_w = network_w self.network_b = network_b self.white_name = white_name self.black_name = black_name self.random_switch = random_switch self.play_times = play_times self.recoard_game = recoard_game self.recoard_dir = recoard_dir self.xargs = xargs self.distributed_server = distributed_server self.distributed_dir = distributed_dir self.nm = net_maintainer.NetMatainer(server=distributed_server, netdir=distributed_dir)
help="distributed server location", default=None) parser.add_argument('--download', '-d', type=str, help="download location", default='data/download_weight') args = parser.parse_args() gpu_num = int(args.gpu) server = args.server netdir = args.download os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_num) #(sess,graph),((X,training),(net_softmax,value_head)) = resnet.get_model('models/5_7_resnet_joint-two_stage/model_57',labels,GPU_CORE=[gpu_num]) nm = net_maintainer.NetMatainer(server=server, netdir=netdir) latest_model_name = nm.get_update() (sess, graph), ((X, training), (net_softmax, value_head)) = resnet.get_model( os.path.join(netdir, latest_model_name), labels, GPU_CORE=[gpu_num], FILTERS=128, NUM_RES_LAYERS=7) nm.updated(latest_model_name) queue = Queue(400) async def push_queue(features, loop): future = loop.create_future() item = QueueItem(features, future)