Example #1
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    dataset = args.dataset  # 'A' or 'B'

    train_path = cfg.TRAIN_PATH.format(dataset)
    train_gt_path = cfg.TRAIN_GT_PATH.format(dataset)
    val_path = cfg.VAL_PATH.format(dataset)
    val_gt_path = cfg.VAL_GT_PATH.format(dataset)

    train_data_loader = DataLoader(train_path,
                                   train_gt_path,
                                   shuffle=True,
                                   gt_downsample=False)
    val_data_loader = DataLoader(val_path,
                                 val_gt_path,
                                 shuffle=False,
                                 gt_downsample=False)
    # 加载数据
    train_X, train_Y_den, train_Y_class = train_data_loader.load_all()
    val_X, val_Y_den, val_Y_class = val_data_loader.load_all()
    class_weights = train_data_loader.get_class_weights()

    # 定义模型 & 编译
    input_shape = (None, None, 1)
    model = CMTL(input_shape)
    adam = Adam(lr=0.00001)
    loss = {'density': 'mse', 'cls': 'categorical_crossentropy'}
    loss_weights = {'density': 1.0, 'cls': 0.0001}
    print('[INFO] Compiling model ...'.format(dataset))
    model.compile(optimizer=adam,
                  loss=loss,
                  loss_weights=loss_weights,
                  metrics={
                      'density': [mae, mse],
                      'cls': 'accuracy'
                  })

    # 定义callback
    checkpointer_best_train = ModelCheckpoint(filepath=os.path.join(
        cfg.MODEL_DIR, 'mcnn_' + dataset + '_train.hdf5'),
                                              monitor='loss',
                                              verbose=1,
                                              save_best_only=True,
                                              mode='min')
    callback_list = [checkpointer_best_train]

    # # 随机数据增广
    # print('[INFO] Random data augment ...'.format(dataset))
    # train_X, train_Y_den = train_data_loader.random_augment(train_X, train_Y_den)
    # 训练
    print('[INFO] Training Part_{} ...'.format(dataset))
    model.fit(train_X, {
        "density": train_Y_den,
        "cls": train_Y_class
    },
              validation_data=(val_X, {
                  "density": val_Y_den,
                  "cls": val_Y_class
              }),
              batch_size=cfg.TRAIN_BATCH_SIZE,
              epochs=cfg.EPOCHS,
              callbacks=callback_list,
              class_weight={"cls": class_weights})
Example #2
0
def main(args):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    dataset = args.dataset  # 'A' or 'B'
    if dataset == 'A':
        model_path = './trained_models/mcnn_A_train.hdf5'
    else:
        model_path = './trained_models/mcnn_B_train.hdf5'

    output_dir = './output_{}/'.format(dataset)
    heatmaps_dir = os.path.join(output_dir,
                                'heatmaps')  # directory to save heatmap
    results_txt = os.path.join(output_dir,
                               'results.txt')  # file to save predicted results
    for _dir in [output_dir, heatmaps_dir]:
        if not os.path.exists(_dir):
            os.mkdir(_dir)

    test_path = cfg.TEST_PATH.format(dataset)
    test_gt_path = cfg.TEST_GT_PATH.format(dataset)
    # load test set
    data_loader = DataLoader(test_path,
                             test_gt_path,
                             shuffle=False,
                             gt_downsample=True)
    _ = data_loader.load_all()
    # load model
    print('[INFO] Load model ...')
    model = load_model(model_path, custom_objects={'tf': tf})

    # test
    print('[INFO] Testing Part_{} ...'.format(dataset))
    mae = 0.0
    mse = 0.0
    acc = 0.0
    for blob in data_loader:
        img = blob['data']
        gt_den = blob['gt_den']
        gt_cls = np.argmax(blob['gt_class'])
        pred_den, pred_cls = model.predict(np.expand_dims(img, axis=0))
        if np.argmax(pred_cls[0]) == gt_cls:
            acc += 1
        gt_count = np.sum(gt_den)
        pred_count = np.sum(pred_den)
        mae += abs(gt_count - pred_count)
        mse += ((gt_count - pred_count) * (gt_count - pred_count))
        # # create and save heatmap
        # pred = np.squeeze(pred)  # shape(1, h, w, 1) -> shape(h, w)
        # save_heatmap(pred, blob, test_path, heatmaps_dir)
        # save results
        with open(results_txt, 'a') as f:
            line = '<{}> {:.2f}--{:.2f}\t{}--{}\n'.format(
                blob['fname'].split('.')[0], gt_count, pred_count, gt_cls,
                np.argmax(pred_cls[0]))
            f.write(line)

    mae = mae / data_loader.num_samples
    mse = np.sqrt(mse / data_loader.num_samples)
    acc = acc / data_loader.num_samples
    print('[RESULT] MAE: %0.2f, MSE: %0.2f, Acc: %0.2f' % (mae, mse, acc))
    with open(results_txt, 'a') as f:
        f.write('MAE: %0.2f, MSE: %0.2f, Acc: %0.2f' % (mae, mse, acc))