def run_episode(self,sess):

        # Get feed_dict
        training_set = dataset.DataGenerator()
        coord_batch, dist_batch, input_batch = training_set.next_batch(self.batch_size, self.max_length, self.input_dimension)
        feed = {self.input_coordinates: coord_batch, self.input_description: input_batch}

        # Actor Forward pass
        seq_input, permutation, seq_proba = sess.run([self.input_coordinates,self.positions,self.proba],feed_dict=feed)

        # Critic Forward pass
        b_s = sess.run(self.prediction_c,feed_dict=feed)

        # Environment response
        trip, circuit_length, reward = sess.run([self.trip,self.distances,self.reward], feed_dict=feed)

        # Train step
        if self.step==0:
            loss1, train_step1 = sess.run([self.loss1,self.train_step1],feed_dict=feed)
        else:
            loss1, train_step1, loss2, train_step2= sess.run([self.loss1,self.train_step1,self.loss2,self.train_step2],feed_dict=feed)

        self.step+=1

        if self.step%100==0:
            self.ptr.temperature*=self.temperature_decay

        return seq_input, permutation, seq_proba, b_s, trip, circuit_length, reward
def prepare_db(opt):
    print("Use %s dataset" % (opt.dataset))


    if opt.dataset == 'cold':
        dir_name = "/train_std.txt"
        train_df = pd.read_csv(os.getcwd() + dir_name, header=None)
        train_df = train_df.iloc[1:, :]
        train_dataset = dataset.DataGenerator(train_df)
        dir_name = "/val_std.txt"
        eval_df = pd.read_csv(os.getcwd() + dir_name, header=None)
        eval_df = eval_df.iloc[1:, :]
        print(len(eval_df))
        eval_dataset = dataset.DataGenerator(eval_df)
        dir_name = "/test_std.txt"
        test_df = pd.read_csv(os.getcwd() + dir_name, header=None)
        test_df = test_df.iloc[1:, :]
        print(len(test_df))
        test_dataset = dataset.DataGenerator(test_df)
        return {'train': train_dataset, 'eval': eval_dataset,'test':test_dataset}
    else:
        raise NotImplementedError
Example #3
0
     logger.info("load weight:%s", args.weights_path)
     model.load_weights(args.weights_path, by_name=True)
     logger.info(model.summary())
 # ネットワーク構成を画像として保存
 utils.plot_model(model, './model.png', True, True)
 # レイヤの重み更新有無を確認
 for i, layer in enumerate(model.layers):
     if layer.__class__.__name__ == 'TimeDistributed':
         name = layer.layer.name
         trainable = layer.layer.trainable
     else:
         name = layer.name
         trainable = layer.trainable
     logger.info('%s %s:%s', i, name, trainable)
 # 学習、検証データ生成器の準備
 gen = dataset.DataGenerator(config)
 train_data_generator = gen.generate(os.path.join(args.data_dir, "train"),
                                     train_generator, train_discriminator)
 val_data_generator = gen.generate(os.path.join(args.data_dir, "val"),
                                   train_generator, train_discriminator)
 model_file_path = './nnmodel/glcic-stage{}-{}'.format(
     args.stage, '{epoch:02d}-{val_loss:.2f}.h5')
 callbacks = [
     keras.callbacks.TerminateOnNaN(),
     keras.callbacks.TensorBoard(log_dir='./tb_log',
                                 histogram_freq=0,
                                 write_graph=True,
                                 write_images=False),
     # keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
     #                                   verbose=1,
     #                                   factor=0.7,
        os.path.join(main_res_path, "models", "best.hdf5"))[0]
    model = keras.models.load_model(best_model_file)
    sess = keras.backend.get_session()

    save_images_path = ''
    if save_images:
        save_images_path = os.path.join(best_model_res_path, 'sample_images')
        os.mkdir(save_images_path)

    transfer = True if model_type == 'vgg_16_transfer' else False
    num_channels = 3 if transfer else 1
    input_shape = (256, 256, num_channels)

    val_g = ds.DataGenerator(data_partition_path=os.path.join(
        data_partition_dir, 'val.npy'),
                             data_source_dir=data_source_dir,
                             dim=input_shape,
                             save_images=save_images,
                             save_images_path=save_images_path)

    val_preds = model.predict_generator(val_g)
    np.savetxt(os.path.join(best_model_res_path, 'val_preds.csv'), val_preds)

    # note shuffling is disabled, batch size is 1 so that data yielded from this generator
    # follows the order of the data in test.npy
    test_g = ds.DataGenerator(data_partition_path=os.path.join(
        data_partition_dir, 'test.npy'),
                              data_source_dir=data_source_dir,
                              dim=input_shape,
                              save_images=save_images,
                              save_images_path=save_images_path)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.SGD(lr=lr,
                                           decay=decay,
                                           momentum=0.9,
                                           nesterov=False),
                  metrics=['accuracy'])

    print("compiled model")
    sys.stdout.flush()

    # prepare the data generators
    train_g = ds.DataGenerator(data_partition_path=os.path.join(
        data_partition_dir, 'train.npy'),
                               data_source_dir=data_source_dir,
                               batch_size=batch_size,
                               dim=input_shape,
                               shuffle=True,
                               augmentation_flag=enable_augmentations,
                               save_images=save_images,
                               save_images_path=save_images_path)

    val_g = ds.DataGenerator(data_partition_path=os.path.join(
        data_partition_dir, 'val.npy'),
                             data_source_dir=data_source_dir,
                             dim=input_shape,
                             save_images=save_images,
                             save_images_path=save_images_path)

    # training callbacks
    csv_logger = CSVLogger(os.path.join(final_model_res_path, 'training.log'))
    # tensorboard_callback=keras.callbacks.TensorBoard()