def main():
    print_config()

    relative_paths = np.loadtxt(CONFIG['imagefile_path'], dtype=str)
    images_base_path = os.path.dirname(CONFIG['imagefile_path'])
    absolute_paths = [images_base_path + i.strip('.') for i in relative_paths]
    bboxes = np.load(CONFIG['boxfile_path'], allow_pickle=True)

    env = TextLocEnv(absolute_paths, bboxes, -1)
    m = CustomModel(10)
    vs = [m(env.reset())]
    g = c.build_computational_graph(vs)
    with open('graph.dot', 'w') as o:
        o.write(g.dump())
def main():
    print_config()

    relative_paths = np.loadtxt(CONFIG['imagefile_path'], dtype=str)
    images_base_path = os.path.dirname(CONFIG['imagefile_path'])
    absolute_paths = [images_base_path + i.strip('.') for i in relative_paths]
    bboxes = np.load(CONFIG['boxfile_path'], allow_pickle=True)

    env = TextLocEnv(absolute_paths, bboxes, -1)
    q_func = chainerrl.q_functions.SingleModelStateQFunctionWithDiscreteAction(
        CustomModel(9))
    optimizer = chainer.optimizers.Adam(eps=1e-2)
    optimizer.setup(q_func)
    replay_buffer = chainerrl.replay_buffer.ReplayBuffer(
        capacity=CONFIG['replay_buffer_capacity'])

    explorer = chainerrl.explorers.ConstantEpsilonGreedy(
        epsilon=0, random_action_func=env.action_space.sample)

    agent = chainerrl.agents.DQN(
        q_func,
        optimizer,
        replay_buffer,
        CONFIG['gamma'],
        explorer,
        gpu=CONFIG['gpu_id'],
        replay_start_size=CONFIG['replay_start_size'],
        update_interval=CONFIG['update_interval'],
        target_update_interval=CONFIG['target_update_interval'])

    agent.load(CONFIG['resultdir_path'] + '/best')
    actions = defaultdict(int)
    with open('iou.txt', 'w') as f:
        i = 0
        for j in range(100):
            obs = env.reset()
            done = False
            while (not done) and i < 100:
                #print(i,j)
                action = agent.act(obs)
                actions[ACTION_MEANINGS[action]] += 1
                obs, reward, done, info = env.step(action)

                print(ACTION_MEANINGS[action], reward, done, info)
                if done:
                    f.write(f'{env.iou}\n')
                #input()
                i += 1
示例#3
0
def run():
    app = QApplication(sys.argv)
    tool = CustomViewer(CustomModel())
    tool.show()
    tool.activateWindow()
    sys.exit(app.exec_())
def main():
    print_config()

    relative_paths = np.loadtxt(CONFIG['imagefile_path'], dtype=str)
    images_base_path = os.path.dirname(CONFIG['imagefile_path'])
    absolute_paths = [images_base_path + i.strip('.') for i in relative_paths]
    bboxes = np.load(CONFIG['boxfile_path'], allow_pickle=True)

    env = TextLocEnv(absolute_paths, bboxes, CONFIG['gpu_id'])

    n_actions = env.action_space.n
    q_func = chainerrl.q_functions.SingleModelStateQFunctionWithDiscreteAction(
        CustomModel(n_actions))
    if CONFIG['gpu_id'] != -1:
        q_func = q_func.to_gpu(CONFIG['gpu_id'])

    # Use Adam to optimize q_func. eps=1e-2 is for stability.
    optimizer = chainer.optimizers.Adam(eps=CONFIG['epsilon'],
                                        amsgrad=True,
                                        alpha=CONFIG['learning_rate'])
    optimizer.setup(q_func)

    # Use epsilon-greedy for exploration
    explorer = chainerrl.explorers.LinearDecayEpsilonGreedy(
        start_epsilon=CONFIG['start_epsilon'],
        end_epsilon=CONFIG['end_epsilon'],
        decay_steps=CONFIG['decay_steps'],
        random_action_func=env.action_space.sample)

    # DQN uses Experience Replay.
    # Specify a replay buffer and its capacity.
    replay_buffer = chainerrl.replay_buffer.EpisodicReplayBuffer(
        capacity=CONFIG['replay_buffer_capacity'])

    # Now create an agent that will interact with the environment.
    agent = chainerrl.agents.DQN(
        q_func,
        optimizer,
        replay_buffer,
        CONFIG['gamma'],
        explorer,
        gpu=CONFIG['gpu_id'],
        replay_start_size=CONFIG['replay_start_size'],
        update_interval=CONFIG['update_interval'],
        target_update_interval=CONFIG['target_update_interval'])

    logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='')

    eval_run_count = 10

    timestr = time.strftime("%Y%m%d-%H%M%S")
    agentClassName = agent.__class__.__name__[:10]

    step_hooks = []
    logger = None
    if CONFIG['use_tensorboard']:
        writer = SummaryWriter("tensorboard/tensorBoard_exp_" + timestr + "_" +
                               agentClassName)
        step_hooks = [TensorBoardLoggingStepHook(writer)]
        handler = TensorBoardEvaluationLoggingHandler(writer, agent,
                                                      eval_run_count)
        logger = logging.getLogger()
        logger.addHandler(handler)

        gradients_weights_log_interval = 100
        optimizer.add_hook(
            TensorboardGradientPlotter(
                summary_writer=writer,
                log_interval=gradients_weights_log_interval))

    # save config file to results dir after initializing agent
    write_config()

    # Overwrite the normal evaluation method
    # chainerrl.experiments.evaluator.run_evaluation_episodes = run_localization_evaluation_episodes

    train_agent_with_evaluation(
        agent,
        env,
        steps=CONFIG['steps'],  # Train the agent for no of steps
        eval_n_episodes=CONFIG[
            'eval_n_episodes'],  # episodes are sampled for each evaluation
        eval_n_steps=None,
        train_max_episode_len=CONFIG[
            'train_max_episode_len'],  # Maximum length of each episodes
        eval_interval=CONFIG[
            'eval_interval'],  # Evaluate the agent after every no of steps
        outdir=CONFIG['resultdir_path'],  # Save everything to directory
        step_hooks=step_hooks,
        logger=logger)

    agent.save('agent_' + timestr + "_" + agentClassName)
# Initialize firebase
cred = credentials.Certificate("firebase.json")
firebase_admin.initialize_app(cred)

store = firestore.client()
snake_species_ref = store.collection(u'snake-species')
authority_data_ref = store.collection(u'authority-data')

class_index = json.load(open('class_index.json'))

# Open and load model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

state = torch.load("custom_model.pth", map_location=device)
model = CustomModel(5)
model.load_state_dict(state)
model = model.to(device)

state_loc = torch.load("custom_model_with_location.pth", map_location=device)
model_loc = CustomModelLoc(5)
model_loc.load_state_dict(state_loc)
model = model.to(device)

# switch to `eval` mode for testing
model.eval()
model_loc.eval()

# Values for image transformation
mean_nums = [0.485, 0.456, 0.406]
std_nums = [0.229, 0.224, 0.225]
示例#6
0
def load_model(input_dir):
    return CustomModel(input_dir)