예제 #1
0
def test_model(model):
    model.eval()
    path_list = []
    temp_list = []
    pre_dict = {}
    save_dict = {}
    all_dict = {}
    sun_set = roadDatasetInfer(TRAIN_DIR)
    test_json = '/data4/mjx/gd/raw_data/amap_traffic_annotations_test.json'
    train_json = '/data4/mjx/gd/raw_data/amap_traffic_annotations_train.json'
    data_loaders = DataLoader(sun_set,
                              batch_size=TRAIN_BATCH_SIZE,
                              shuffle=False,
                              num_workers=4)
    for data in data_loaders:
        inputs, paths = data
        inputs = inputs.cuda()
        output = model(inputs)
        output = output.data.cpu().detach().numpy().tolist()

        for idx in range(len(output)):
            #     temp[idx] += output[idx]
            # output[idx].insert(0, paths[idx].split('/')[-1])
            pre_dict[paths[idx].split('/')[-1]] = output[idx]
        # temp_list += output
    with open(train_json) as f:
        submit = json.load(f)
    submit_annos = submit['annotations']
    for i in range(len(submit_annos)):
        submit_anno = submit_annos[i]
        imgId = submit_anno['id']
        key_frame = submit_anno['key_frame']
        all_res = []
        for name in submit_anno['frames']:
            try:
                name_pre = pre_dict[imgId + '_' + name['frame_name']]
                all_res.append(name_pre)
            except:
                continue
        if all_res != []:
            all_dict[imgId] = np.array(all_res).mean(axis=0)
            try:
                save_dict[imgId] = pre_dict[imgId + '_' + key_frame]
            except:
                continue

    df = pd.DataFrame.from_dict(save_dict,
                                orient='index',
                                columns=['P1', 'P2', 'P3'])
    df = df.reset_index().rename(columns={'index': 'map_id'})
    df.to_csv('feature/train_key_feature_seg_w.csv', index=False)

    all_df = pd.DataFrame.from_dict(all_dict,
                                    orient='index',
                                    columns=['ave_P1', 'ave_P2', 'ave_P3'])
    all_df = all_df.reset_index().rename(columns={'index': 'map_id'})
    all_df.to_csv('feature/train_all_feature_seg_w.csv', index=False)
예제 #2
0
    def train_infer(self, model, epoch):

        rawLabelDir = '/data4/mjx/gd/raw_data/amap_traffic_annotations_test.json'
        image_datasets = roadDatasetInfer(self.test_dir)
        dataset_loaders = torch.utils.data.DataLoader(
            image_datasets,
            batch_size=self.test_batch_size,
            shuffle=False,
            num_workers=self.num_workers)
        model.eval()
        pre_result = []
        pre_name = []
        pre_dict = {}
        for data in dataset_loaders:
            inputs, paths = data
            inputs = inputs.cuda()
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, 1)
            pre_result += preds.cpu().numpy().tolist()
            for frame in paths:
                pre_name.append(frame.split('/')[-1])
        assert len(pre_name) == len(pre_result)
        for idx in range(len(pre_result)):
            pre_dict[pre_name[idx]] = pre_result[idx]

        count_result = {'畅通': 0, '缓行': 0, '拥堵': 0}
        with open(rawLabelDir) as f:
            submit = json.load(f)
        submit_annos = submit['annotations']
        submit_result = []
        for i in range(len(submit_annos)):
            submit_anno = submit_annos[i]
            imgId = submit_anno['id']
            frame_name = [
                imgId + '_' + i['frame_name'] for i in submit_anno['frames']
            ]
            status_all = [pre_dict[i] for i in frame_name]
            status = max(status_all, key=status_all.count)
            submit['annotations'][i]['status'] = status

        submit_json = '{}/{}/{}_{}.json'.format(self.sub_dir, self.model_name,
                                                self.model_name, epoch)
        json_data = json.dumps(submit)
        with open(submit_json, 'w') as w:
            w.write(json_data)
        f_class, score, P, R, real_f1 = compare(submit_json, self.real_json)
        count_result = count(submit_json)
        count_result_real = count(self.real_json)
        self.logger.info("{} 第{} epoch 预测结果:{}".format(self.model_name, epoch,
                                                       count_result))
        self.logger.info("{} 预测结果:{}".format(self.real_json,
                                             count_result_real))
        self.logger.info("{} 和 {} 的 f1:{} 加权f1:{} ".format(
            self.model_name, self.real_json, f_class, real_f1))
        self.logger.info("{} 和 {} 的 Acc:{} Precision:{} Recall:{}".format(
            self.model_name, self.real_json, score, P, R))
예제 #3
0
def test_model(model):
    model.eval()
    path_list = []
    temp_list = []
    temp_dict = {}
    save_dict = {}
    all_dict = {}
    sun_set = roadDatasetInfer(TRAIN_DIR)
    train_json = '/data4/mjx/GD-B/amap_traffic_final_train_0906.json'
    data_loaders = DataLoader(sun_set,
                              batch_size=TRAIN_BATCH_SIZE,
                              shuffle=False,
                              num_workers=4)
    for data in data_loaders:
        inputs, paths = data
        inputs = inputs.cuda()
        temp, output = model(inputs)
        output = output.data.cpu().detach().numpy().tolist()
        temp = temp.cpu().detach().numpy().tolist()
        # print(paths[idx].split('/')[-1])
        # assert False
        for idx in range(len(output)):

            temp_dict[paths[idx].split('/')[-1]] = temp[idx]
        # temp_list += output
    with open(train_json) as f:
        submit = json.load(f)
    submit_annos = submit['annotations']
    for i in range(len(submit_annos)):
        submit_anno = submit_annos[i]
        imgId = submit_anno['id']
        key_frame = submit_anno['key_frame']
        all_res = []
        for name in submit_anno['frames']:
            name_pre = temp_dict[imgId + '_' + name['frame_name']]
            all_res.append(name_pre)
        all_dict[imgId] = np.array(all_res).mean(axis=0)
        save_dict[imgId] = temp_dict[imgId + '_' + key_frame]

    all_df = pd.DataFrame.from_dict(
        all_dict,
        orient='index',
        columns=['F{}'.format(i) for i in range(2048)])
    all_df = all_df.reset_index().rename(columns={'index': 'map_id'})
    all_df.to_csv('to_dw.csv', index=False)
예제 #4
0
def test_model(model):
    model.eval()
    path_list = []
    temp_list = []
    res_temp = []
    sun_set = roadDatasetInfer(TRAIN_DIR)
    data_loaders = DataLoader(sun_set,
                              batch_size=TRAIN_BATCH_SIZE,
                              shuffle=False,
                              num_workers=4)
    for data in data_loaders:
        inputs, paths = data
        inputs = inputs.cuda()
        temp, output = model(inputs)
        temp = temp.cpu().detach().numpy().tolist()
        output = output.data.cpu().detach().numpy().tolist()
        for idx in range(len(temp)):
            temp[idx] += output[idx]
            temp[idx].insert(0, paths[idx].split('/')[-1])
        temp_list += temp
    pd.DataFrame(temp_list).to_csv('feature/train_feature_merge.csv',
                                   index=False,
                                   header=None)