示例#1
0
    def execute(self):
        model, cls_info, history, ft_history = self.fit_model()

        utils.mkdir(self.dst_dir, rm=True)
        model.save(self.est_file)

        mutils.save_model_info(self.info_file, self.graph_file, model)

        with open(self.cls_file, 'wb') as f:
            pickle.dump(cls_info, f)
        print(f'Classes: {cls_info}')

        utils.plot(history, self.hist_file)
        utils.plot(ft_history, self.ft_hist_file)

        def get_min(loss):
            min_val = min(loss)
            min_ind = loss.index(min_val)
            return min_val, min_ind

        print('Before fine-tuning')
        min_val, min_ind = get_min(history['val_loss'])
        print(f'val_loss: {min_val} (Epochs: {min_ind + 1})')

        print('After fine-tuning')
        min_val, min_ind = get_min(ft_history['val_loss'])
        print(f'val_loss: {min_val} (Epochs: {min_ind + 1})')
def test():
    data = Data(cargs.min_size, cargs.max_size)
    env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)
    agent = [Agent(env, args[0]), Agent(env, args[1])]
    wl_mean, score_mean = [[deque(maxlen=10000),
                            deque(maxlen=10000)] for _ in range(2)]
    wl, score = [[deque(maxlen=1000), deque(maxlen=1000)] for _ in range(2)]
    cnt_w, cnt_l = 0, 0
    exp_rate = [args[0].exp_rate, args[1].exp_rate]
    # agent[0].model.load_state_dict(torch.load(checkpoint_path_1, map_location = agent[0].model.device))
    # agent[1].model.load_state_dict(torch.load(checkpoint_path_2, map_location = agent[1].model.device))

    for _ep in range(cargs.n_epochs):
        if _ep % 10 == 9:
            print('Testing_epochs: {}'.format(_ep + 1))
        done = False
        start = time.time()
        current_state = env.get_observation(0)
        for _iter in range(env.n_turns):
            if cargs.show_screen:
                env.render()
            """ initialize """
            actions, soft_state, soft_agent_pos, pred_acts, exp_rewards = \
                [[[], []] for i in range(5)]
            """ update by step """
            for i in range(env.num_players):
                soft_state[i] = env.get_observation(i)
                soft_agent_pos[i] = env.get_agent_pos(i)
                pred_acts[i], exp_rewards[i] = agent[i].select_action_smart(
                    soft_state[i], soft_agent_pos[i], env)
            """ select action for each agent """
            for agent_id in range(env.n_agents):
                for i in range(env.num_players):
                    ''' get state to forward '''
                    state_step = env.get_states_for_step(current_state)
                    agent_step = env.get_agent_for_step(
                        agent_id, i, soft_agent_pos)
                    ''' predict from model'''
                    if random.random() < exp_rate[i]:
                        act = pred_acts[i][agent_id]
                    else:
                        # print(i)
                        act = agent[i].get_action(state_step, agent_step)
                        # act, _, _ = agent[i].select_action(state_step, agent_step)
                    ''' convert state to opponent state '''
                    env.convert_to_opn_obs(current_state, soft_agent_pos)
                    ''' storage infomation trainning'''
                    actions[i].append(act)
                ''' last action to fit next state '''
                acts = [actions[0][-1], actions[1][-1]]
                current_state, temp_rewards = env.soft_step_2(
                    agent_id, current_state, acts, soft_agent_pos)

            # actions[1] = [np.random.randint(0, env.n_actions - 1) for _ in range(env.n_agents)]
            # actions[1] = [0] * env.n_agents
            # actions[1] = pred_acts[1]
            current_state, final_reward, done, _ = env.step(
                actions[0], actions[1], cargs.show_screen)
            if done:
                score[0].append(env.players[0].total_score)
                score[1].append(env.players[1].total_score)
                if env.players[0].total_score > env.players[1].total_score:
                    cnt_w += 1
                else:
                    cnt_l += 1
                break

        end = time.time()

        wl[0].append(cnt_w)
        wl[1].append(cnt_l)
        for i in range(2):
            wl_mean[i].append(np.mean(wl[i]))
            score_mean[i].append(np.mean(score[i]))

        if _ep % 50 == 49:
            plot(wl_mean, vtype='Win')
            plot(score_mean, vtype='Score')
            print("Time: {0: >#.3f}s".format(1000 * (end - start)))
        env.soft_reset()
def train(): 
    data = Data(cargs.min_size, cargs.max_size)
    env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)
    agent = [Agent(env, args[0], 'agent_1'), Agent(env, args[1], 'agent_2')]
    wl_mean, score_mean, l_val_mean =\
        [[deque(maxlen = 10000), deque(maxlen = 10000)]  for _ in range(3)]
    wl, score, l_val = [[deque(maxlen = 1000), deque(maxlen = 1000)] for _ in range(3)]
    lr_super = [args[0].exp_rate, args[1].exp_rate]
    cnt_w, cnt_l = 0, 0
    # agent[0].model.load_state_dict(torch.load(checkpoint_path_1, map_location = agent[0].model.device))
    # agent[1].model.load_state_dict(torch.load(checkpoint_path_2, map_location = agent[1].model.device))
        
    for _ep in range(cargs.n_epochs):
        if _ep % 10 == 9:
            print('Training_epochs: {}'.format(_ep + 1))
        for _game in range(cargs.n_games):
            done = False
            start = time.time()
            for _iter in count():
                if cargs.show_screen:
                    env.render()
                    
                """ initialize """
                actions, state_vals, log_probs, rewards, soft_state, \
                    soft_agent_pos, pred_acts, exp_rewards = [[[], []] for i in range(8)]
                    
                """ update by step """
                for i in range(env.num_players):
                    soft_state[i] = env.get_observation(i)
                    soft_agent_pos[i] = env.get_agent_pos(i)
                    pred_acts[i], exp_rewards[i] = agent[i].select_action_smart(soft_state[i], soft_agent_pos[i], env)

                """ select action for each agent """
                for agent_id in range(env.n_agents):
                    for i in range(env.num_players):
                        agent_state = env.get_states_for_step(soft_state[i])
                        # not change
                        agent_step = env.get_agent_for_step(agent_id, soft_agent_pos)
                        act, log_p, state_val = 0, 0, 0
                        if random.random() < lr_super[i]:
                            act, log_p, state_val = agent[i].select_action_by_exp(
                                agent_state, agent_step, pred_acts[i][agent_id])
                        else:
                            act, log_p, state_val = agent[i].select_action(agent_state, agent_step)
                                
                        soft_state[i] = env.soft_step_(agent_id, soft_state[i], act, soft_agent_pos[i])
                        state_vals[i].append(state_val)
                        actions[i].append(act)
                        log_probs[i].append(log_p)
                # actions[1] = [np.random.randint(0, env.n_actions - 1) for _ in range(env.n_agents)]
                # actions[1] = [0] * env.n_agents
                # actions[1] = pred_acts[1]
                next_state, final_reward, done, _ = env.step(actions[0], actions[1], cargs.show_screen)
                for i in range(env.n_agents):
                    rewards[0].append(final_reward)
                    rewards[1].append(- final_reward)
                    for j in range(env.num_players):
                        if pred_acts[j][i] == actions[j][i]:
                            reward = exp_rewards[j][i]
                            beta = 0.9
                            rewards[j][i] = rewards[j][i] * (1 - beta)  + beta * reward
                        agent[j].model.store(log_probs[j][i], state_vals[j][i], rewards[j][i])
                if done:
                    score[0].append(env.players[0].total_score)
                    score[1].append(env.players[1].total_score)
                    if env.players[0].total_score > env.players[1].total_score:
                        cnt_w += 1
                    else:
                        cnt_l += 1
                    break
            agent[0].learn()
            agent[1].learn()
            end = time.time()
            if _ep > 3:
                l_val[0].append(agent[0].value_loss)
                l_val[1].append(agent[1].value_loss)
                wl[0].append(cnt_w)
                wl[1].append(cnt_l)
                for i in range(2):
                    wl_mean[i].append(np.mean(wl[i]))
                    score_mean[i].append(np.mean(score[i]))
                    l_val_mean[i].append(np.mean(l_val[i]))
            
            env.soft_reset()
        if _ep % 50 == 49:
            if cargs.visualize:
                plot(wl_mean, vtype = 'Win')
                plot(score_mean, vtype = 'Score')
                plot(l_val_mean, vtype = 'Loss_Value')
                print("Time: {0: >#.3f}s". format(1000*(end - start)))
            if args[0].saved_checkpoint:
                agent[0].save_models()
                # torch.save(agent[0].model.state_dict(), checkpoint_path_1)
            if args[1].saved_checkpoint:
                agent[1].save_models()
                # torch.save(agent[1].model.state_dict(), checkpoint_path_2)
                # print('Completed episodes')
        # lr_super *= 0.999
        env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)
def test(): 
    data = Data(cargs.min_size, cargs.max_size)
    env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)
    agent = [Agent(env, args[0], 'agent_1'), Agent(env, args[1], 'agent_2')]
    wl_mean, score_mean = [[deque(maxlen = 10000), deque(maxlen = 10000)]  for _ in range(2)]
    wl, score = [[deque(maxlen = 1000), deque(maxlen = 1000)] for _ in range(2)]
    cnt_w, cnt_l = 0, 0
    # agent[0].model.load_state_dict(torch.load(checkpoint_path_1, map_location = agent[0].model.device))
    # agent[1].model.load_state_dict(torch.load(checkpoint_path_2, map_location = agent[1].model.device))
        
    for _ep in range(cargs.n_epochs):
        if _ep % 10 == 9:
            print('Testing_epochs: {}'.format(_ep + 1))
        done = False
        start = time.time()
        for _iter in count():
            if cargs.show_screen:
                env.render()
                
            """ initialize """
            actions, soft_state, soft_agent_pos = [[[], []] for i in range(3)]
                
            """ update by step """
            for i in range(env.num_players):
                soft_state[i] = env.get_observation(i)
                soft_agent_pos[i] = env.get_agent_pos(i)
               
            """ select action for each agent """
            for agent_id in range(env.n_agents):
                for i in range(env.num_players):
                    agent_state = env.get_states_for_step(soft_state[i])
                    agent_step = env.get_agent_for_step(agent_id, soft_agent_pos[i])
                    act, log_p, state_val = agent[i].select_action(agent_state, agent_step)
                            
                    soft_state[i] = env.soft_step_(agent_id, soft_state[i], act, soft_agent_pos[i])
                    actions[i].append(act)
            # actions[1] = [np.random.randint(0, env.n_actions - 1) for _ in range(env.n_agents)]
            # actions[1] = [0] * env.n_agents
            # actions[1] = pred_acts[1]
            next_state, final_reward, done, _ = env.step(actions[0], actions[1], cargs.show_screen)
            if done:
                score[0].append(env.players[0].total_score)
                score[1].append(env.players[1].total_score)
                if env.players[0].total_score > env.players[1].total_score:
                    cnt_w += 1
                else:
                    cnt_l += 1
                break
            
        end = time.time()
            
        wl[0].append(cnt_w)
        wl[1].append(cnt_l)
        for i in range(2):
            wl_mean[i].append(np.mean(wl[i]))
            score_mean[i].append(np.mean(score[i]))
                
        if _ep % 50 == 49:
            plot(wl_mean, vtype = 'Win')
            plot(score_mean, vtype = 'Score')
            print("Time: {0: >#.3f}s". format(1000*(end - start)))
        env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)
示例#5
0
import pandas as pd
from src.pc import pc
from src.utils import get_causal_chains, plot

if __name__ == '__main__':
    file_path = 'data/test.csv'
    image_path = 'data/result.png'

    data = pd.read_csv(file_path)
    n_nodes = data.shape[1]
    labels = data.columns.to_list()

    p = pc(suff_stat={
        "C": data.corr().values,
        "n": data.shape[0]
    },
           verbose=True)

    # DFS 因果关系链
    print(get_causal_chains(p, start=2, labels=labels))

    # 画图
    plot(p, labels, image_path)
示例#6
0
def run_fit_on_single_country(country, save=False, path=None):
    data = utils.get_json_from_url(config.DATA_URL)

    df = pd.DataFrame(data[country])
    date = df['date'].values[-1]

    df = df[df['confirmed'] > config.MIN_CONFIRMED_CASES]

    df = df.reset_index(drop=True)

    future_dates = list(
        np.linspace(0, config.MAX_DAYS_AHEAD, num=config.MAX_DAYS_AHEAD))
    df_projected = pd.DataFrame(index=future_dates)

    x = np.array([float(x) for x in range(len(df))])
    x_future = np.array([float(x) for x in range(len(df_projected))])

    y = utils.scale(df['confirmed'].values)
    df['confirmed_fit'] = utils.fit_predict(x, y, utils.logistic)
    df = compute_all_derivatives(df, 'confirmed', times_pred=x)

    df_projected['confirmed_fit'] = utils.fit_predict(x,
                                                      y,
                                                      utils.logistic,
                                                      x_pred=x_future)
    df_projected = compute_all_derivatives(df_projected,
                                           'confirmed',
                                           times_pred=x_future)

    y = utils.scale(df['deaths'].values)
    df['deaths_fit'] = utils.fit_predict(x, y, utils.logistic)
    df = compute_all_derivatives(df, 'deaths', times_pred=x)

    df_projected['deaths_fit'] = utils.fit_predict(x,
                                                   y,
                                                   utils.logistic,
                                                   x_pred=x_future)
    df_projected = compute_all_derivatives(df_projected,
                                           'deaths',
                                           times_pred=x_future)

    fig, axs = plt.subplots(2, 2, figsize=(15, 8))
    x = df.index
    x_proj = df_projected.index

    utils.plot(x,
               df['confirmed_fit'],
               ax=axs[0, 0],
               points=df['confirmed'],
               title='confirmed',
               label='best fit',
               scatter_label='actual')
    utils.plot(x, df['first_dev_confirmed'], ax=axs[1, 0], title="f'(x)")

    utils.plot_projection(x_proj,
                          df_projected['confirmed_fit'],
                          ax=axs[0, 0],
                          label='projected')
    utils.plot_projection(x_proj,
                          df_projected['first_dev_confirmed'],
                          ax=axs[1, 0])

    utils.plot(x,
               df['deaths_fit'],
               ax=axs[0, 1],
               points=df['deaths'],
               title='deaths')
    utils.plot(x, df['first_dev_deaths'], ax=axs[1, 1], title="f'(x)")

    utils.plot_projection(x_proj, df_projected['deaths_fit'], ax=axs[0, 1])
    utils.plot_projection(x_proj,
                          df_projected['first_dev_deaths'],
                          ax=axs[1, 1])

    axs[0, 0].legend(loc=(0.8, 0.1))
    fig.autofmt_xdate()
    fig.text(0.5,
             0.01,
             f'days since {config.MIN_CONFIRMED_CASES} confirmed cases',
             ha='center')
    plt.suptitle(f"Country: {country}. Last update: {date}")

    plt.show()
    if save:
        path = path or os.path.join(config.SRC_PATH,
                                    f'../examples/{country.lower()}.png')
        fig.savefig(path, bbox_inches='tight')

    plt.close()
示例#7
0
文件: train.py 项目: ualsg/Roofpedia
def loop():
    device = torch.device("cuda")

    if not torch.cuda.is_available():
        sys.exit("Error: CUDA requested but not available")

    # weighted values for loss functions
    # add a helper to return weights seamlessly
    try:
        weight = torch.Tensor([1.513212, 10.147043])
    except KeyError:
        if model["opt"]["loss"] in ("CrossEntropy", "mIoU", "Focal"):
            sys.exit("Error: The loss function used, need dataset weights values")

    # loading Model
    net = UNet(num_classes)
    net = DataParallel(net)
    net = net.to(device)

    # define optimizer 
    optimizer = Adam(net.parameters(), lr=lr)

    # resume training
    if model_path:
        chkpt = torch.load(model_path, map_location=device)
        net.load_state_dict(chkpt["state_dict"])
        optimizer.load_state_dict(chkpt["optimizer"])

    # select loss function, just set a default, or try to experiment
    if loss_func == "CrossEntropy":
        criterion = CrossEntropyLoss2d(weight=weight).to(device)
    elif loss_func == "mIoU":
        criterion = mIoULoss2d(weight=weight).to(device)
    elif loss_func == "Focal":
        criterion = FocalLoss2d(weight=weight).to(device)
    elif loss_func == "Lovasz":
        criterion = LovaszLoss2d().to(device)
    else:
        sys.exit("Error: Unknown Loss Function value !")

    #loading data
    train_loader, val_loader = get_dataset_loaders(target_size, batch_size, dataset_path)
    history = collections.defaultdict(list)

    # training loop
    for epoch in range(0, num_epochs):

        print("Epoch: " + str(epoch +1))
        train_hist = train(train_loader, num_classes, device, net, optimizer, criterion)
        
        val_hist = validate(val_loader, num_classes, device, net, criterion)
        
        print("Train loss: {:.4f}, mIoU: {:.3f}, {} IoU: {:.3f}, MCC: {:.3f}".format(
                train_hist["loss"], train_hist["miou"], target_type, train_hist["fg_iou"], train_hist["mcc"]))
        
        print("Validation loss: {:.4f}, mIoU: {:.3f}, {} IoU: {:.3f}, MCC: {:.3f}".format(
                 val_hist["loss"], val_hist["miou"], target_type, val_hist["fg_iou"], val_hist["mcc"]))
        
        for key, value in train_hist.items():
            history["train " + key].append(value)

        for key, value in val_hist.items():
            history["val " + key].append(value)

        if (epoch+1)%5 == 0:
            # plotter use history values, no need for log
            visual = "history-{:05d}-of-{:05d}.png".format(epoch + 1, num_epochs)
            plot(os.path.join(checkpoint_path, visual), history)
        
        if (epoch+1)%20 == 0:
            checkpoint = target_type + "-checkpoint-{:03d}-of-{:03d}.pth".format(epoch + 1, num_epochs)
            states = {"epoch": epoch + 1, "state_dict": net.state_dict(), "optimizer": optimizer.state_dict()}
            torch.save(states, os.path.join(checkpoint_path, checkpoint))
示例#8
0
 def plot(self, column_1_number, column_2_number):
     utils.plot(self.data_listed,
                self.kmeans_algorithm.labels_,
                column_1_number,
                column_2_number,
                title='K-means results visualization')
def train():
    data = Data(args.min_size, args.max_size)
    env = Environment(data.get_random_map(), args.show_screen, args.max_size)
    bot = Agent(env, args)

    wl_mean, score_mean, l_val_mean, l_pi_mean =\
        [[deque(maxlen = 10000), deque(maxlen = 10000)]  for _ in range(4)]
    wl, score, l_val, l_pi = [[deque(maxlen=1000),
                               deque(maxlen=1000)] for _ in range(4)]
    cnt_w, cnt_l = 0, 0
    # bot.model.load_state_dict(torch.load(checkpoint_path_1, map_location = bot.model.device))
    # agent[1].model.load_state_dict(torch.load(checkpoint_path_2, map_location = agent[1].model.device))

    for _ep in range(args.n_epochs):
        if _ep % 10 == 9:
            print('Training_epochs: {}'.format(_ep + 1))
        for _game in range(args.n_games):
            done = False
            start = time.time()
            current_state = env.get_observation(0)

            for _iter in range(env.n_turns):
                if args.show_screen:
                    env.render()
                """ initialize """
                actions, state_vals, log_probs, rewards, soft_agent_pos = [
                    [[], []] for i in range(5)
                ]
                """ update by step """
                for i in range(env.num_players):
                    soft_agent_pos[i] = env.get_agent_pos(i)
                """ select action for each agent """
                for agent_id in range(env.n_agents):
                    for i in range(env.num_players):
                        ''' get state to forward '''
                        state_step = env.get_states_for_step(current_state)
                        agent_step = env.get_agent_for_step(
                            agent_id, soft_agent_pos)
                        ''' predict from model'''
                        act, log_p, state_val = bot.select_action(
                            state_step, agent_step)
                        ''' convert state to opponent state '''
                        env.convert_to_opn_obs(current_state, soft_agent_pos)
                        ''' storage infomation trainning'''
                        state_vals[i].append(state_val)
                        actions[i].append(act)
                        log_probs[i].append(log_p)
                    ''' last action to fit next state '''
                    acts = [actions[0][-1], actions[1][-1]]
                    current_state, temp_rewards = env.soft_step_2(
                        agent_id, current_state, acts, soft_agent_pos)
                    rewards[0].append(temp_rewards[0] - temp_rewards[1])
                    rewards[1].append(temp_rewards[1] - temp_rewards[0])

                current_state, final_reward, done, _ = env.step(
                    actions[0], actions[1], args.show_screen)
                for i in range(env.n_agents):
                    for j in range(env.num_players):
                        bot.model.store(j, log_probs[j][i], state_vals[j][i],
                                        rewards[j][i])

            # store the win lose battle
            in_win = env.players[0].total_score > env.players[1].total_score
            if in_win: cnt_w += 1
            else: cnt_l += 1

            score[0].append(env.players[0].total_score)
            score[1].append(env.players[1].total_score)
            bot.learn()
            end = time.time()
            if _ep > 3:
                l_val[0].append(bot.value_loss)
                l_pi[0].append(bot.policy_loss)
                # wl[0].append(cnt_w)
                # wl[1].append(cnt_l)
                for i in range(2):
                    # wl_mean[i].append(np.mean(wl[i]))
                    score_mean[i].append(np.mean(score[i]))
                    l_val_mean[i].append(np.mean(l_val[i]))
                    l_pi_mean[i].append(np.mean(l_pi[i]))

            env.soft_reset()
        if _ep % 100 == 99:
            if args.visualize:
                # plot(wl_mean, vtype = 'Win')
                plot(score_mean, vtype='ScoreTrue')
                plot(l_val_mean, vtype='Loss_Value')
                plot(l_pi_mean, vtype='Loss_Policy')
                print("Time: {0: >#.3f}s".format(1000 * (end - start)))
            if args.saved_checkpoint:
                bot.save_models()
                # torch.save(bot.model.state_dict(), checkpoint_path_1)
                # print('Completed episodes')
        env = Environment(data.get_random_map(), args.show_screen,
                          args.max_size)
示例#10
0
文件: forel.py 项目: petukhovv/FOREL
 def plot(self, column_1_number, column_2_number):
     utils.plot(self.result, self.clusters,
                column_1_number, column_2_number, title='FOREL results vizualization')