def run_cnn(conf):

    x_train, y_train, x_test, y_test = data(conf)

    network = CNN(conf)
    model = network.build()
    model.summary()

    if conf["subnetwork"]["use_subnet"]:
        print("Number of subnetworks: {}".format(comp_num_subnets(model)))

    tf.keras.utils.plot_model(model,
                              conf["paths"]["results"] + "model.png",
                              show_shapes=True,
                              show_layer_names=True,
                              expand_nested=True)

    model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
                  optimizer=tf.keras.optimizers.Adam(learning_rate=conf["training"]["learning_rate"]),
                  metrics=['accuracy'])

    datagen = tf.keras.preprocessing.image.ImageDataGenerator(horizontal_flip=True,
                                                              rotation_range=10.0,
                                                              width_shift_range=0.2,
                                                              height_shift_range=0.2,
                                                              shear_range=10.0,
                                                              zoom_range=0.1)

    date_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    appendix = "1" if conf["subnetwork"]["use_subnet"] else "0"
    stats_dir = conf["paths"]["stats"] + date_time + "_" + appendix
    Path(stats_dir).mkdir(parents=True, exist_ok=True)
    model_dir = conf["paths"]["model"] + date_time + "_" + appendix
    Path(model_dir).mkdir(parents=True, exist_ok=True)

    callback_tensorboard = tf.keras.callbacks.TensorBoard(log_dir=stats_dir, histogram_freq=1)
    callback_checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=model_dir + "/model.ckpt",
                                                             save_weights_only=True,
                                                             monitor='val_accuracy',
                                                             mode='max',
                                                             save_best_only=True,
                                                             save_freq='epoch',
                                                             verbose=1)

    batch_size = conf["training"]["batch_size"]
    epochs = conf["training"]["epochs"]
    model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),
              steps_per_epoch=len(x_train) // batch_size,
              epochs=epochs,
              validation_data=(x_test, y_test),
              validation_steps=len(x_test) // batch_size,
              callbacks=[callback_tensorboard, callback_checkpoint],
              verbose=2)

    tf.keras.backend.clear_session()
Esempio n. 2
0
def train_test(config, logger):

    tot_acc = 0
    tot_prec = 0
    tot_rec = 0
    tot_f1 = 0
    tot_auc = 0

    total_start_time = time.time()
    # Training for each fold
    for i in range(0, config.kfold):
        # To match the output filenames
        k = str(i + 1)

        # Load data iterator
        dataloader = Dataloader(config, k)

        # Load model
        arch = LSTM(config, dataloader).to(config.device)
        if config.model == 'cnn':
            arch = CNN(config, dataloader).to(config.device)

        # Print network configuration
        logger.info(arch)

        # Trainer
        model = Trainer(config, logger, dataloader, arch, k)

        # Train
        if not config.eval:
            logger.info("**************Training started !!!**************\n")
            logger.info("Starting training on {0}th-fold".format(k))
            model.fit()

        # Test
        logger.info("**************Testing Started !!!**************\n")
        model.load_checkpoint()
        acc, prec, rec, f1, auc = model.predict()
        logger.info(
            "Accuracy: %6.3f Precision: %6.3f Recall: %6.3f FB1: %6.3f AUC: %6.3f"
            % (acc, prec, rec, f1, auc))
        logger.info("***********************************************\n")

        # Calculate the metrics
        tot_acc += acc
        tot_prec += prec
        tot_rec += rec
        tot_f1 += f1
        tot_auc += auc

    total_end_time = time.time()

    # Display final results
    epoch_mins, epoch_secs = utilities.epoch_time(total_start_time,
                                                  total_end_time)
    logger.info("Epoch Time: %dm %ds" % (epoch_mins, epoch_secs))
    logger.info(
        "Final_Accuracy;%6.3f;Final_Precision;%6.3f;Final_Recall;%6.3f;Final_FB1;%6.3f;Final_AUC;%6.3f "
        % (tot_acc / config.kfold, tot_prec / config.kfold, tot_rec /
           config.kfold, tot_f1 / config.kfold, tot_auc / config.kfold))
Esempio n. 3
0
 def test_cnn(self):
     num_actions = 4
     agent_history_length = 4
     frame_width = 84
     frame_height = 84
     cnn = CNN(num_actions, agent_history_length, frame_width, frame_height)
     s_shape = cnn.s.get_shape().as_list()
     self.assertEqual(
         s_shape, [None, agent_history_length, frame_width, frame_height])
     q_values_shape = cnn.q_values.get_shape().as_list()
     self.assertEqual(q_values_shape, [None, num_actions])
     plot_model(cnn.model,
                show_shapes=True,
                show_layer_names=True,
                to_file='model.png')
Esempio n. 4
0
    def test(self):
        """
        mainメソッド.
        学習後のモデルをテストする.
        """
        # Q-Network
        q_func = CNN(self.env.action_space.n, self.history_len, self.width,
                     self.height)
        # Sessionの構築
        sess = tf.InteractiveSession()
        # session読み込み
        restore_sess(sess, self.save_network_path)
        # エージェント初期化
        agent = DQNTestAgent(num_actions=self.env.action_space.n,
                             q_func=q_func)

        # メインループ
        t = 0
        episode = 0
        while t < self.tmax:
            # エピソード実行
            episode += 1
            duration = 0
            total_reward = 0.0
            done = False
            # 環境初期化
            obs = self.env.reset()
            # エピソード終了まで実行
            while not done:
                # 行動を選択
                action = agent.action(t, obs)
                # 行動を実行し,報酬と次の画面とdoneを観測
                obs, reward, done, info = self.env.step(action)
                self.env.render()
                total_reward += reward
                t += 1
                duration += 1
            print(
                'EPISODE: {0:6d} / TIME_STEP: {1:8d} / DURATION: {2:5d} / TOTAL_REWARD: {3:3.0f}'
                .format(episode, t, duration, total_reward))
Esempio n. 5
0
def infer(config, logger):
    k = str(config.kfold)
    dataloader = Dataloader(config, k)

    # Load model
    arch = LSTM(config, dataloader).to(config.device)
    if config.model == 'cnn':
        arch = CNN(config, dataloader).to(config.device)

    # Print network configuration
    logger.info(arch)

    # Trainer
    model = Trainer(config, logger, dataloader, arch, k)

    model.load_checkpoint()

    logger.info("Inferred results")

    pred_tag = model.infer(config.txt, config.at, config.ac)

    print(config.txt + '\t' + config.at + '\t' + config.ac + '\t' + pred_tag +
          '\n')
Esempio n. 6
0
def initlize(config):
    if config["model_name"] == 'LSTM':
        model = lstm(config['input_dim'], config['hid_dim'],
                     config['n_layers'], config['drop'], config['bid'],
                     device).to(device)
    if config["model_name"] == 'RESNET':
        model = resnet18(in_dim=config['input_dim'],
                         num_classes=config['num_classes']).to(device)
    elif config["model_name"] == 'TCN':
        model = TCN(config['input_channels'], config['n_classes'],
                    config['num_channels'], config['kernel_size'],
                    config['drop']).to(device)
    elif config["model_name"] == 'CNN':
        model = CNN(config['input_dim'], config['out_dim'], config['fc_drop'],
                    device).to(device)
    elif config["model_name"] == 'VRNN':
        model = VRNN_model(config['input_dim'], config['hid_dim'],
                           config['z_dim'], config['n_layers'], config['drop'],
                           device).to(device)
    elif config["model_name"] == 'seq2seq':
        model = seq2seq(config['input_dim'], config['hid_dim'],
                        config['n_layers'], config['drop'], config['bid'],
                        device).to(device)
    return model
Esempio n. 7
0
    def learn(self):
        """
        mainメソッド.
        DQNのアルゴリズムを回す.
        """
        # Replay Memory
        replay_mem = ReplayMemory(self.mem_size)

        # Q-Network
        q_func = CNN(self.env.action_space.n, self.history_len, self.width,
                     self.height)
        q_network_weights = q_func.model.trainable_weights  # 学習される重み
        # TargetNetwork
        target_func = CNN(self.env.action_space.n, self.history_len,
                          self.width, self.height)
        target_network_weights = target_func.model.trainable_weights  # 重みのリスト

        # 定期的にTargetNetworkをQ-Networkで同期する処理
        assign_target_network = [
            target_network_weights[i].assign(q_network_weights[i])
            for i in range(len(target_network_weights))
        ]

        # 誤差関数や最適化のための処理
        a, y, loss, grad_update = self.build_training_op(
            self.env.action_space.n, q_func)

        # Sessionの構築
        sess = tf.InteractiveSession()

        # 変数の初期化(Q Networkの初期化)
        sess.run(tf.global_variables_initializer())

        # Target Networkの初期化
        sess.run(assign_target_network)

        # エージェント初期化
        agent = DQNAgent(num_actions=self.env.action_space.n,
                         q_func=q_func,
                         schedule_time_steps=int(self.expl_frac * self.tmax),
                         initial_time_step=self.replay_st_size,
                         final_p=self.fin_expl)

        # Logger
        logger = Logger(sess, self.save_summary_path)

        t = 0
        episode = 0
        # メインループ
        while t < self.tmax:
            # エピソード実行
            episode += 1
            duration = 0
            total_reward = 0.0
            total_q_max = 0.0
            total_loss = 0
            done = False
            # 環境初期化
            obs = self.env.reset()
            # エピソード終了まで実行
            while not done:
                # 前の状態を保存
                pre_obs = obs.copy()
                # ε-greedyに従って行動を選択
                action = agent.action(t, obs)
                # 行動を実行し,報酬と次の画面とdoneを観測
                obs, reward, done, info = self.env.step(action)
                # replay memoryに(s_t,a_t,r_t,s_{t+1},done)を追加
                replay_mem.add(pre_obs, action, reward, obs, done)
                if self.render:
                    self.env.render()
                if t > self.replay_st_size and t % self.learn_freq == 0:
                    # Q-Networkの学習
                    total_loss += self.train(sess, q_func, a, y, loss,
                                             grad_update, replay_mem,
                                             target_func)
                if t > self.replay_st_size and t % self.update_freq == 0:
                    # Target Networkの更新
                    sess.run(assign_target_network)
                if t > self.replay_st_size and t % self.save_network_freq == 0:
                    save_sess(sess, self.save_network_path, t)
                total_reward += reward
                total_q_max += np.max(
                    q_func.q_values.eval(feed_dict={q_func.s: [obs]}))
                t += 1
                duration += 1
            if t >= self.replay_st_size:
                logger.write(sess, total_reward, total_q_max / float(duration),
                             duration, total_loss / float(duration), t,
                             episode)
            print(
                'EPISODE: {0:6d} / TIME_STEP: {1:8d} / DURATION: {2:5d} / EPSILON: {3:.5f} / TOTAL_REWARD: {4:3.0f} '
                '/ AVG_MAX_Q: {5:2.4f} / AVG_LOSS: {6:.5f}'.format(
                    episode, t, duration, agent.epsilon.value(t), total_reward,
                    total_q_max / float(duration),
                    total_loss / float(duration)))
def plot_subnets_stats(conf):
    """Visualizes the graphs of subnetworks.

    Args:
        conf: Dictionary consisting of configuration parameters.
    """

    network_type = conf["network"]["type"]
    if network_type == "cnn":
        network = CNN(conf)
    elif network_type == "mlp":
        network = MLP(conf)

    model = network.build()

    _, _, x_test, _ = data(conf)
    x_test = x_test[:1000]

    # Get directories of all models
    dir_names = next(os.walk(conf["paths"]["model"]))[1]
    model_dirs = ["{}{}{}".format(conf["paths"]["model"], dir_name, "/model.ckpt")
                  for dir_name in dir_names if dir_name.split("/")[-1].endswith("1")]
    print("{} models found.".format(len(model_dirs)))

    # Determine avg and std of subnet activations
    subnets_statistics = list()
    for model_dir in model_dirs:
        model.load_weights(model_dir).expect_partial()
        stats = get_preactivation_statistics(model, x_test)
        subnets_statistics.append(stats)

    # Extract layer names
    layer_names = [layer_name for layer_name in subnets_statistics[0].keys()]
    print("{} subnets found.".format(len(layer_names)))

    stats = {layer_name: {"avg": None, "std": None} for layer_name in layer_names}

    # Compute average stats of all subnets
    for layer_name in layer_names:
        avg = 0.0
        std = 0.0
        for subnet in subnets_statistics:
            avg += subnet[layer_name]["avg"]
            std += subnet[layer_name]["std"]
        stats[layer_name]["avg"] = avg / len(subnets_statistics)
        stats[layer_name]["std"] = std / len(subnets_statistics)

    # Extract graphs from subnets from all models
    graphs_dict = {layer_name: {"x": list(), "y": list()} for layer_name in layer_names}
    for model_dir in model_dirs:
        model.load_weights(model_dir).expect_partial()
        graphs = get_subnet_graphs(model, stats)
        for layer_name in layer_names:
            graphs_dict[layer_name]["x"].append(graphs[layer_name]["x"])
            graphs_dict[layer_name]["y"].append(graphs[layer_name]["y"])

    stats = {layer_name: {"x": None, "y_avg": None, "y_std": None} for layer_name in layer_names}

    for layer_name in layer_names:
        stats[layer_name]["x"] = np.mean(graphs_dict[layer_name]["x"], axis=0)  # not necessary
        stats[layer_name]["y_avg"] = np.mean(graphs_dict[layer_name]["y"], axis=0)
        stats[layer_name]["y_std"] = np.std(graphs_dict[layer_name]["y"], axis=0)

    # Plot results
    nrows = 2
    ncols = 4
    fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(2*ncols, 2*nrows))
    for i, (layer_name, ax) in enumerate(zip(layer_names, axes.flatten())):
        x = stats[layer_name]["x"]
        y_avg = stats[layer_name]["y_avg"]
        y_std = stats[layer_name]["y_std"]
        x_all = np.squeeze(graphs_dict[layer_name]["x"]).T
        y_all = np.squeeze(graphs_dict[layer_name]["y"]).T
        ax.plot(x_all, y_all, linewidth=0.3, alpha=0.4, color="green")
        ax.plot(x, y_avg, linewidth=1.0, color="green")
        ax.fill_between(x, y_avg - y_std, y_avg + y_std, alpha=0.2, color="green")
        ax.grid(True, alpha=0.5, linestyle='dotted')
        ax.set_title("$s^{{({layer})}}$".format(layer=str(i+1)))
    plt.tight_layout()
    plt.savefig('{}subnetworks_{}.png'.format(conf["paths"]["results"], "test"), dpi=100)
    plt.close(fig)