コード例 #1
0
    def run(self):
        m = Manager()
        global_ep, res_dict = m.dict(), m.dict()
        global_ep['g_ep'] = 0
        global_ep['g_sum_step'] = 0
        res_dict['res_q'] = []
        rew_queue = m.Queue()
        agent_input_dict_list = m.list([self.input_dict for _ in range(1)])
        agent_return_dict_list = m.list([m.dict() for _ in range(1)])

        a3c_workers = [
            A3CWorker(global_ep, res_dict, rew_queue, agent_input_dict_list[i],
                      agent_return_dict_list[i], i) for i in range(1)
        ]

        [w.start() for w in a3c_workers]
        [w.join() for w in a3c_workers]

        res = res_dict['res_q']
        print('game_result:', res)
        for agent_return_dict in agent_return_dict_list:
            print(agent_return_dict)

        win_rate, tie_rate, lose_rate, step_game = self.calculate_statistics(
            agent_return_dict_list)
        print(win_rate, tie_rate, lose_rate, step_game)
        self.return_dict[int(self.nth_pbt_process)] = [win_rate, step_game]
コード例 #2
0
def hdf5_to_dict(hdf5_file_path):
    data = {}
    manager = Manager()
    md_data = manager.dict()
    with h5py.File(hdf5_file_path, 'r') as read_file:
        for scene in tqdm(read_file.keys()):
            # data[scene] = {}
            data[scene] = manager.dict()
            for pos in read_file[scene].keys():
                data[scene][pos] = read_file[scene][pos][()]
    md_data.update(data)
    return md_data
コード例 #3
0
    def colorize(request, context):
        try:
            manager = Manager()
            return_dict = manager.dict()
            p = Process(target=mp_colorize, args=(request.img_input,
                                                  request.render_factor,
                                                  return_dict))
            p.start()
            p.join()

            response = return_dict.get("response", None)
            if not response or "error" in response:
                error_msg = response.get("error", None) if response else None
                log.error(error_msg)
                context.set_details(error_msg)
                context.set_code(grpc.StatusCode.INTERNAL)
                return Output()

            log.debug("colorize({})={}".format(request.img_input[:50], response["img_colorized"][:50]))
            return Output(img_colorized=response["img_colorized"])

        except Exception as e:
            traceback.print_exc()
            log.error(e)
            return Output()
コード例 #4
0
ファイル: supervised.py プロジェクト: michelgokan/AlphaTSP
def run(args):

    # setup
    N, D = args.N, args.D
    n_examples = args.n_train_examples
    n_threads = args.n_threads
    n_examples_per_thread = n_examples // n_threads

    # create policy network
    policy_network = alphatsp.util.get_policy_network(args.policy_network)

    # generate examples
    print("Generating examples and training...")

    manager = Manager()
    train_queue = manager.Queue()
    shared_dict = manager.dict()

    shared_dict["success"] = False

    producers = []
    for _ in range(n_threads):
        producers.append(
            Process(target=generate_examples,
                    args=(n_examples_per_thread, train_queue, args)))

    for p in producers:
        p.start()

    c = Process(target=train,
                args=(policy_network, train_queue, shared_dict, args))
    c.start()

    for p in producers:
        p.join()
    train_queue.put(None)

    c.join()

    status = shared_dict["success"]
    if not status:
        print("Experiment failed.")
        return -1
コード例 #5
0
        count_tie = 0
        count_lose = 0
        count_sum_game = 0
        count_sum_step = 0
        # calculate the count number of games wins or loses, and the count number of steps,
        # and return the average number of them
        for agent_return_dict in agent_return_dict_list:
            for idx in range(len(agent_return_dict['game_result'])):
                count_sum_game += 1
                count_sum_step += agent_return_dict['game_steps'][idx]
                if agent_return_dict['game_result'][idx] == "Win":
                    count_win += 1
                if agent_return_dict['game_result'][idx] == "Tie":
                    count_tie += 1
                if agent_return_dict['game_result'][idx] == "Lose":
                    count_lose += 1

        assert count_sum_game != 0, 'No Game Counted!!!'
        print("Count Sum games : ", count_sum_game)
        return count_win / count_sum_game, count_tie / count_sum_game, count_lose / count_sum_game, count_sum_step / count_sum_game


if __name__ == '__main__':
    mp.set_start_method("spawn")
    manager = Manager()
    return_dict = manager.dict()
    input_dict = manager.dict()
    uniq_name = ""  #sys.argv[1]
    instance = TestFunc(uniq_name, 0, input_dict, return_dict)
    instance.run()
コード例 #6
0
def train(experiment: int, batch: int, resume: bool):
    cfg = OthelloConfig(experiment, batch)
    manager = Manager()
    buffer = manager.list()
    replay_buffer = ReplayBuffer(buffer)
    shared_state_dicts = manager.dict()
    message_queue = Queue()
    log_queue = Queue(
    )  # a single log is dictionary and "gs", "type" keys are must
    writer = SummaryWriter(cfg.dir_log)
    if resume:
        print("Loading replay buffer to resume training...")
        with open(cfg.dir_replay_buffer, "rb") as f:
            buff_list = pickle.load(f)
        replay_buffer.save_training_data(buff_list)
        del buff_list
        print("Replay buffer loaded.")
    training_worker = TrainingWorker("Training Worker", message_queue,
                                     log_queue, shared_state_dicts,
                                     replay_buffer, cfg.device_name_tw, cfg,
                                     resume)
    evaluation_worker = EvaluationWorker("Evaluation Worker", message_queue,
                                         log_queue, shared_state_dicts,
                                         cfg.device_name_ew, cfg, resume)
    self_play_workers = []
    for i in range(cfg.num_self_play_workers):
        self_play_workers.append(
            SelfPlayWorker("Self-Play Worker-" + str(i), message_queue,
                           log_queue, shared_state_dicts, replay_buffer,
                           cfg.device_names_sp[i], cfg))
    print("Starting training...")
    training_worker.start()
    evaluation_worker.start()
    for worker in self_play_workers:
        worker.start()
    print("Training started.")
    try:
        while training_worker.is_alive():
            if log_queue.empty():
                time.sleep(1.0)
                continue
            log = log_queue.get()
            for k, v in log.items():
                if k in ["gs", "type"]:
                    continue
                if log["type"] == "scalar":
                    writer.add_scalar(k, v, log["gs"])
                else:
                    print("Unknown log type found:", log["type"])
            del log
    except KeyboardInterrupt:
        print("KeyboardInterrupt, stopping training...")
    finally:
        for i in range(cfg.num_self_play_workers * 5):
            message_queue.put(cfg.message_interrupt)
        training_worker.join()
        evaluation_worker.join()
        for worker in self_play_workers:
            worker.join()
        print("Saving replay buffer...")
        buff_list = list(buffer)
        with open(cfg.dir_replay_buffer, "wb") as f:
            pickle.dump(buff_list, f)
        del buff_list
        print("Replay buffer saved.")
コード例 #7
0
def main():
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    args = flag_parser.parse_arguments()

    create_shared_model = model_class(args.model)
    init_agent = agent_class(args.agent_type)

    args.episode_type = "TestValEpisode"
    args.test_or_val = "val"

    tb_log_dir = args.log_dir + "/" + '{}_{}_{}'.format(
        args.title, args.test_or_val,
        time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime(time.time())))
    log_writer = SummaryWriter(log_dir=tb_log_dir)

    print('Start Loading!')
    optimal_action_path = './data/AI2thor_Combine_Dataset/Optimal_Path_Combine.json'
    with open(optimal_action_path, 'r') as read_file:
        optimal_action_dict = json.load(read_file)
    manager = Manager()
    optimal_action = manager.dict()
    optimal_action.update(optimal_action_dict)
    glove_file_path = './data/AI2thor_Combine_Dataset/det_feature_512_eval.hdf5'
    glove_file = hdf5_to_dict(glove_file_path)
    print('Loading Success!')

    # Get all valid saved_models for the given title and sort by train_ep.
    checkpoints = [(f, f.split("_")) for f in os.listdir(args.save_model_dir)]
    checkpoints = [(f, int(s[-3])) for (f, s) in checkpoints
                   if len(s) >= 4 and f.startswith(args.title)]
    checkpoints.sort(key=lambda x: x[1])

    best_model_on_val = None
    best_performance_on_val = 0.0
    for (f, train_ep) in tqdm(checkpoints, desc="Checkpoints."):

        model = os.path.join(args.save_model_dir, f)
        args.load_model = model

        # run eval on model
        args.test_or_val = "val"
        main_eval(args, create_shared_model, init_agent, glove_file,
                  optimal_action)

        # check if best on val.
        with open(args.results_json, "r") as f:
            results = json.load(f)

        if results["success"] > best_performance_on_val:
            best_model_on_val = model
            best_performance_on_val = results["success"]

        log_writer.add_scalar("val/success", results["success"], train_ep)
        log_writer.add_scalar("val/spl", results["spl"], train_ep)

        # run on test.
        args.test_or_val = "test"
        main_eval(args, create_shared_model, init_agent, glove_file,
                  optimal_action)
        with open(args.results_json, "r") as f:
            results = json.load(f)

        log_writer.add_scalar("test/success", results["success"], train_ep)
        log_writer.add_scalar("test/spl", results["spl"], train_ep)

    args.record_route = True
    args.test_or_val = "test"
    args.load_model = best_model_on_val
    main_eval(args, create_shared_model, init_agent, glove_file,
              optimal_action)

    with open(args.results_json, "r") as f:
        results = json.load(f)

    print(
        tabulate(
            [
                ["SPL >= 1:", results["GreaterThan/1/spl"]],
                ["Success >= 1:", results["GreaterThan/1/success"]],
                ["SPL >= 5:", results["GreaterThan/5/spl"]],
                ["Success >= 5:", results["GreaterThan/5/success"]],
            ],
            headers=["Metric", "Result"],
            tablefmt="orgtbl",
        ))

    print("Best model:", args.load_model)
コード例 #8
0
def main():
    setproctitle.setproctitle("Train/Test Manager")
    args = flag_parser.parse_arguments()

    if args.model == "BaseModel" or args.model == "GCN":
        args.learned_loss = False
        args.num_steps = 50
        target = nonadaptivea3c_val if args.eval else nonadaptivea3c_train
    else:
        args.learned_loss = True
        args.num_steps = 6
        target = savn_val if args.eval else savn_train

    create_shared_model = model_class(args.model)
    init_agent = agent_class(args.agent_type)
    optimizer_type = optimizer_class(args.optimizer)

    if args.eval:
        main_eval(args, create_shared_model, init_agent)
        return

    start_time = time.time()
    local_start_time_str = time.strftime("%Y-%m-%d_%H:%M:%S",
                                         time.localtime(start_time))
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    random.seed(args.seed)

    if args.log_dir is not None:
        tb_log_dir = args.log_dir + "/" + args.title + "-" + local_start_time_str
        log_writer = SummaryWriter(log_dir=tb_log_dir)
    else:
        log_writer = SummaryWriter(comment=args.title)

    if args.gpu_ids == -1:
        args.gpu_ids = [-1]
    else:
        torch.cuda.manual_seed(args.seed)
        mp.set_start_method("spawn")

    shared_model = create_shared_model(args)

    train_total_ep = 0
    n_frames = 0

    if shared_model is not None:
        shared_model.share_memory()
        optimizer = optimizer_type(
            filter(lambda p: p.requires_grad, shared_model.parameters()), args)
        optimizer.share_memory()
        print(shared_model)
    else:
        assert (args.agent_type == "RandomNavigationAgent"
                ), "The model is None but agent is not random agent"
        optimizer = None

    processes = []

    print('Start Loading!')
    optimal_action_path = './data/AI2thor_Combine_Dataset/Optimal_Path_Combine.json'
    with open(optimal_action_path, 'r') as read_file:
        optimal_action_dict = json.load(read_file)
    manager = Manager()
    optimal_action = manager.dict()
    optimal_action.update(optimal_action_dict)
    glove_file_path = './data/AI2thor_Combine_Dataset/det_feature_512_train.hdf5'
    glove_file = hdf5_to_dict(glove_file_path)
    # det_gt_path = './data/AI2thor_Combine_Dataset/Instance_Detection_Combine.pkl'
    # with open(det_gt_path, 'rb') as read_file:
    #     det_gt = pickle.load(read_file)
    print('Loading Success!')

    end_flag = mp.Value(ctypes.c_bool, False)

    train_res_queue = mp.Queue()

    for rank in range(0, args.workers):
        p = mp.Process(
            target=target,
            args=(
                rank,
                args,
                create_shared_model,
                shared_model,
                init_agent,
                optimizer,
                train_res_queue,
                end_flag,
                glove_file,
                optimal_action,
                # det_gt,
            ),
        )
        p.start()
        processes.append(p)
        time.sleep(0.1)

    print("Train agents created.")

    train_thin = args.train_thin
    train_scalars = ScalarMeanTracker()

    # start_ep_time = time.time()

    try:
        while train_total_ep < args.max_ep:

            train_result = train_res_queue.get()
            train_scalars.add_scalars(train_result)
            train_total_ep += 1
            n_frames += train_result["ep_length"]
            # if train_total_ep % 10 == 0:
            #     print(n_frames / train_total_ep)
            #     print((time.time() - start_ep_time) / train_total_ep)
            if (train_total_ep % train_thin) == 0:
                log_writer.add_scalar("n_frames", n_frames, train_total_ep)
                tracked_means = train_scalars.pop_and_reset()
                for k in tracked_means:
                    log_writer.add_scalar(k + "/train", tracked_means[k],
                                          train_total_ep)

            if (train_total_ep % args.ep_save_freq) == 0:

                print(n_frames)
                if not os.path.exists(args.save_model_dir):
                    os.makedirs(args.save_model_dir)
                state_to_save = shared_model.state_dict()
                save_path = os.path.join(
                    args.save_model_dir,
                    "{0}_{1}_{2}_{3}.dat".format(args.title, n_frames,
                                                 train_total_ep,
                                                 local_start_time_str),
                )
                torch.save(state_to_save, save_path)

    finally:
        log_writer.close()
        end_flag.value = True
        for p in processes:
            time.sleep(0.1)
            p.join()
コード例 #9
0
    def recognize_scene(self, request, context):
        """Wraps the scene recognition model to make sure inputs and outputs match the service requirements."""

        # Store the names of the images to delete them afterwards
        created_images = []

        # Python command call arguments. Key = argument name, value = tuple(type, required?, default_value)
        arguments = {
            "input_image": ("image", True, None),
            "predict": ("string", True, self.prediction_list)
        }

        # Treat inputs
        try:
            image_path, predict, file_index_str = self.treat_inputs(
                request, arguments, created_images)
        except HTTPError as e:
            error_message = "Error downloading the input image \n" + str(e)
            log.error(error_message)
            self.result.data = error_message
            context.set_details(error_message)
            context.set_code(grpc.StatusCode.INTERNAL)
            return self.result
        except Exception as e:
            log.error(e)
            self.result.data = e
            context.set_details(str(e))
            context.set_code(grpc.StatusCode.INTERNAL)
            return self.result

        # Get cam (color activation mappings) file path
        input_filename = os.path.split(created_images[0])[1]
        log.debug("Input file name: {}".format(input_filename))
        output_image_path = self.output_dir + '/' + input_filename
        log.debug("Output image path (cam_path): {}".format(output_image_path))
        created_images.append(output_image_path)

        manager = Manager()
        return_dict = manager.dict()
        p = Process(target=mp_recognize,
                    args=(image_path, predict, output_image_path, return_dict))
        p.start()
        p.join()

        response = return_dict.get("response", None)
        if not response or "error" in response:
            error_msg = response.get("error", None) if response else None
            log.error(error_msg)
            context.set_details(error_msg)
            context.set_code(grpc.StatusCode.INTERNAL)
            return SceneRecognitionResult()

        # Prepare gRPC output message
        self.result = SceneRecognitionResult()
        log.debug("Got result.")
        self.result.data = json.dumps(response)
        log.debug("Output generated. Service successfully completed.")

        for image in created_images:
            service.serviceUtils.clear_file(image)

        return self.result
コード例 #10
0
    # Get save directory
    save_dir = os.path.join(os.getcwd(), args.save_dir, exp_name)
    if not os.path.exists(save_dir):
        os.makedirs(os.path.join(save_dir, 'configs'))
    print('Save directory:\t {}'.format(save_dir))

    # Get device ID
    if torch.cuda.is_available():
        device = torch.cuda.device_count()
    else:
        device = 'cpu'
    print('Device:\t {}'.format(device))

    #init master
    master = manager.dict()
    master['start_time'] = strftime("%Y-%m-%dT%H-%M-%S", gmtime())
    master['experiment_name'] = exp_name
    master['device'] = device
    master['summaries'] = {}

    # Run a single experiment
    if args.index >= 0:
        if args.index < len(config_files):
            config_files = [config_files[args.index]]
        else:
            print(
                "WARNING: Index out of range, will run all experiments in folder."
            )

    if device == 'cpu':