예제 #1
0
def train(args_):
    """
    进行训练
    :return:
    """
    model = MSCNN((224, 224, 3))
    model.compile(optimizer=SGD(lr=3e-4, momentum=0.9), loss='mse')
    # load pretrained model
    if args_['pretrained'] == 'yes':
        model.load_weights('../models/best_model_weights.h5')
        print("load model from ../models/")

    callbacks = get_callbacks()

    # 流式读取,一个batch读入内存
    batch_size = int(args_['batch'])
    if args_['dataset'] == 'malldataset':
        model.fit_generator(MallDataset().gen_train(batch_size, 224),
                            steps_per_epoch=MallDataset().get_train_num() // batch_size,
                            validation_data=MallDataset().gen_valid(batch_size, 224),
                            validation_steps=MallDataset().get_valid_num() // batch_size,
                            epochs=int(args_['epochs']),
                            callbacks=callbacks)
    elif args_['dataset'] == 'shanghaitechdataset':
        model.fit_generator(ShanghaitechDataset().gen_train(batch_size, 224),
                            steps_per_epoch=ShanghaitechDataset().get_train_num() // batch_size,
                            validation_data=ShanghaitechDataset().gen_valid(batch_size, 224),
                            validation_steps=ShanghaitechDataset().get_valid_num() // batch_size,
                            epochs=int(args_['epochs']),
                            callbacks=callbacks)
    else:
        print('not support this dataset')
예제 #2
0
파일: test.py 프로젝트: zzubqh/CrowdCount
def predict():
    import glob
    from tqdm import tqdm

    data_dir = '../dataset/test'
    VGG_Model = '../models/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
    Dense_Model = '../models/dense_model_weights.h5'
    Mscnn_Model = os.path.join(MODEL_PATH, 'mscnn_model_weights.h5')

    images = glob.glob(data_dir + r'\*.jpg')
    res = []

    # 密度等级分类模型
    dense_net = DenseLevelNet(VGG_Model, Dense_Model)
    dense_model = dense_net.model()
    dense_model.load_weights(Dense_Model, by_name=True)

    # 具体人数分类模型
    crow_model = MSCNN((224, 224, 3))
    crow_model.load_weights(Mscnn_Model)

    for img_name in tqdm(images):
        try:
            img = imopen(img_name)
            img = np.expand_dims(img, axis=0)
            dense_prob = dense_model.predict(img)
            dense_level = np.argmax(dense_prob, axis=1)
            dense_level = dense_level[0]
            if dense_level == 0:
                crow_count = 0
            elif dense_level == 2:
                crow_count = 100
            else:
                dmap = crow_model.predict(img)
                dmap[dmap < 0.01] = 0
                dmap = np.squeeze(dmap, axis=-1)
                crow_count = int(np.sum(dmap))
                if crow_count > 100:
                    crow_count = 100
            res.append([os.path.split(img_name)[1], crow_count])
        except Exception as e:
            print(img_name)
            res.append([os.path.split(img_name)[1], -1])

    with open(r'crowdcount/result/dense_res.csv', 'w') as sw:
        for item in res:
            sw.write('{0},{1}\n'.format(item[0], item[1]))
예제 #3
0
파일: train.py 프로젝트: zhudaoruyi/MSCNN
def train(batch, epochs, size):
    """Train the model.

    Arguments:
        batch: Integer, The number of train samples per batch.
        epochs: Integer, The number of train iterations.
        size: Integer, image size.
    """
    if not os.path.exists('model'):
        os.makedirs('model')

    model = MSCNN((size, size, 3))

    opt = SGD(lr=1e-5, momentum=0.9, decay=0.0005)
    model.compile(optimizer=opt, loss='mse')

    lr = ReduceLROnPlateau(monitor='loss', min_lr=1e-7)

    indices = list(range(1500))
    train_ids, test_ids = train_test_split(indices, test_size=0.25)

    hist = model.fit_generator(
        generator(train_ids, batch, size),
        validation_data=generator(test_ids, batch, size),
        steps_per_epoch=len(train_ids) // batch,
        validation_steps=len(test_ids) // batch,
        epochs=epochs,
        callbacks=[lr])

    model.save_weights('model\\final_weights.h5')

    df = pd.DataFrame.from_dict(hist.history)
    df.to_csv('model\\history.csv', index=False, encoding='utf-8')
예제 #4
0
파일: test.py 프로젝트: zzubqh/CrowdCount
def test(args_):
    model = MSCNN((224, 224, 3))
    model_file = os.path.join(MODEL_PATH, 'mscnn_model_weights.h5')
    if os.path.exists(model_file):
        model.load_weights(model_file, by_name=True)
        samples, true_counts = get_samples_crowdataset(5)
        maps = []
        counts = []
        for sample in samples:
            dmap = model.predict(sample)
            dmap[dmap < 0.01] = 0
            dmap = np.squeeze(dmap, axis=-1)
            counts.append(int(np.sum(dmap)))
            maps.append(dmap)
        # print(counts)
        save_result(samples, maps, counts, args_, true_counts)
    else:
        print("please download model frist!")
예제 #5
0
def test(args_):
    """
    测试模型效果
    :param args_:
    :return:
    """
    model = MSCNN((224, 224, 3))
    if os.path.exists('../models/best_model_weights.h5'):
        model.load_weights('../models/best_model_weights.h5')
        samples, true_counts = get_samples_shanghaitech(5)
        maps = []
        counts = []
        for sample in samples:
            dmap = np.squeeze(model.predict(sample), axis=-1)
            dmap = cv2.GaussianBlur(dmap, (15, 15), 0)
            counts.append(int(np.sum(dmap)))
            maps.append(dmap)
        save_result(samples, maps, counts, args_, true_counts)
    else:
        print("Sorry, cannot find model file in root_path/models/, please download my model or train your model")
예제 #6
0
def train(args_):
    model = MSCNN((224, 224, 3))
    # model.compile(optimizer=Adam(lr=3e-4), loss=denseloss)
    model.compile(optimizer=Adam(lr=3e-4), loss='mse')
    # load pretrained model
    if args_['pretrained'] == 'yes':
        model.load_weights('../models/mscnn_model_weights.h5')
        print("load model success")

    callbacks = get_callbacks()

    batch_size = int(args_['batch'])
    model.fit_generator(CrowDataset().gen_train(batch_size, 224),
                        steps_per_epoch=CrowDataset().get_train_num() // batch_size,
                        validation_data=CrowDataset().gen_valid(batch_size, 224),
                        validation_steps=CrowDataset().get_valid_num() // batch_size,
                        epochs=int(args_['epochs']),
                        callbacks=callbacks)    
예제 #7
0
    evaluate the predicted resul.

    # Arguments
        y_true: List/ndarray, ture data.
        y_pred: List/ndarray, predicted data.
    """
    mae = metrics.mean_absolute_error(y_true, y_pred)
    mse = metrics.mean_squared_error(y_true, y_pred)

    print('mae:%f' % mae)
    print('mse:%f' % mse)


if __name__ == '__main__':
    name = 'data\\mall_dataset\\frames\\seq_001600.jpg'
    #    name = 'data\\timg3.jpg'

    model = MSCNN((224, 224, 3))
    model.load_weights('model\\final_weights.h5')

    img = cv2.imread(name)
    img = cv2.resize(img, (224, 224))
    img = img / 255.
    img = np.expand_dims(img, axis=0)

    dmap = model.predict(img)[0][:, :, 0]
    dmap = cv2.GaussianBlur(dmap, (15, 15), 0)

    visualization(img[0], dmap)
    print('count:', int(np.sum(dmap)))
예제 #8
0
파일: main.py 프로젝트: striver314/MSCNN
    window = int(args.steps * args.days)
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    device_ids = range(torch.cuda.device_count())

    # Set the random seed manually for reproducibility.
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        if not args.cuda:
            print(
                "WARNING: You have a CUDA device, so you should probably run with --cuda"
            )
        else:
            torch.cuda.manual_seed(args.seed)
    Data = Data_utility(args.data, 0.6, 0.2, args.cuda, args.horizon, window,
                        args.days, args.steps, args.normalize)
    model = MSCNN(window, args.steps, args.skip, Data.train[0].shape[1],
                  args.C_nums, args.C_steps, args.cuda)
    criterion = nn.MSELoss(size_average=False)
    evaluateL2 = nn.MSELoss(size_average=False)
    evaluateL1 = nn.L1Loss(size_average=False)
    best_val = np.inf
    if args.cuda:
        model.cuda()
        criterion = criterion.cuda()
        evaluateL1 = evaluateL1.cuda()
        evaluateL2 = evaluateL2.cuda()
    if args.cuda:
        model = torch.nn.DataParallel(model)
    optim = Optim.Optim(
        model.parameters(),
        'adam',
        0.001,
예제 #9
0
    pixel_num = trans_map.shape[0] * trans_map.shape[1]
    # 取得前1%的最小像素点
    fractor = 0.01
    # 待选数
    selected_num = int(pixel_num * fractor)

    # 先给透射图排个序,方便统计
    trans_map_sorted = np.sort(trans_map)

    # 获得索引
    (row_idx,
     col_idx) = np.where(trans_map_sorted[0:selected_num] == trans_map)

    A = np.max(J[row_idx, col_idx])
    return A


if __name__ == '__main__':
    # 建立模型
    mscnn = MSCNN(batch_size, epochs, learning_rate)
    # 载入训练数据
    (train_datas, val_datas,
     test_datas) = load_data_generator(TRAIN_PERCENTAGE, VAL_PERCENTAGE)

    if mode == 'train':
        # train
        mscnn.train_on_generator(train_datas, val_datas)
    elif mode == 'test':
        # test
        mscnn.test_on_generator(test_datas)