コード例 #1
0
ファイル: cluster_eval.py プロジェクト: ahangchen/TrackViz
def main():
    opt = arg_parse()
    cluster_tracks = pickle_load(opt.track_path)
    score = purity_score_for_track(cluster_tracks)
    print('score %f' % score)
    write(
        opt.result_path, 'cluster task: %s \t\tscore: %f\n' %
        (opt.track_path.split('/')[-1], score))
コード例 #2
0
def random_hp():
    image_size = 28
    num_labels = 10
    train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = \
        format_mnist()
    pick_size = 2048
    valid_dataset = valid_dataset[0:pick_size, :, :, :]
    valid_labels = valid_labels[0:pick_size, :]
    test_dataset = test_dataset[0:pick_size, :, :, :]
    test_labels = test_labels[0:pick_size, :]
    basic_hypers = {
        'batch_size': 10,
        'patch_size': 10,
        'depth': 10,
        'num_hidden': 10,
        'layer_sum': 3,
        'starter_learning_rate': 0.1
    }
    if basic_hypers['patch_size'] > 28:
        basic_hypers['patch_size'] = 28

    print('=' * 80)
    print(basic_hypers)

    for _ in range(200):
        random_hypers = {
            'batch_size':
            basic_hypers['batch_size'] + random.randint(-9, 30),
            'patch_size':
            basic_hypers['patch_size'] + random.randint(-5, 15),
            'depth':
            basic_hypers['depth'] + random.randint(-9, 10),
            'num_hidden':
            basic_hypers['num_hidden'] * random.randint(1, 10),
            'layer_sum':
            basic_hypers['layer_sum'] + random.randint(-1, 2),
            'starter_learning_rate':
            basic_hypers['starter_learning_rate'] * random.uniform(0, 5)
        }
        # if basic_hypers['batch_size'] < 10:
        #     basic_hypers['batch_size'] = 10
        if random_hypers['patch_size'] > 28:
            random_hypers['patch_size'] = 28
        print('=' * 80)
        print(random_hypers)
        stride_params = [[1, 2, 2, 1]
                         for _ in range(random_hypers['layer_sum'])]
        hps, loss_es = conv_train(train_dataset, train_labels, valid_dataset,
                                  valid_labels, test_dataset, test_labels,
                                  image_size, num_labels, random_hypers,
                                  stride_params)
        file_helper.write('hps', str(hps))
        file_helper.write('losses', str(loss_es))
コード例 #3
0
def write_with_confidence(confidence_bound):
    cross_score_path = 'data/top10-test/cross_filter_score.log'
    cross_pid_path = 'data/top10-test/cross_filter_pid.log'

    filter_pid_path = 'data/top10/conf_filter_pid.log'
    filter_score_path = 'data/top10/conf_filter_score.log'

    safe_remove(filter_pid_path)
    safe_remove(filter_score_path)
    score_lines = read_lines(cross_score_path)
    persons_scores = [[float(score) for score in score_line.split()] for score_line in score_lines]
    max_score = max([max(predict_scores) for predict_scores in persons_scores])

    pid_lines = read_lines(cross_pid_path)


    write_line_cnt = 0
    for i in range(len(persons_scores)):
        scores = persons_scores[i]
        pids = pid_lines[i].split()
        has_write = False
        for j in range(len(persons_scores[0])):
            confidence = max(scores[j]/max_score, 1 - scores[j]/max_score)
            if confidence < confidence_bound:
                # print(confidence)
                if not has_write:
                    write_line_cnt += 1
                has_write = True
                write(filter_pid_path, pids[j] + ' ')
                write(filter_score_path, '%f ' % scores[j])
        write(filter_pid_path, '\n')
        write(filter_score_path, '\n')
    return write_line_cnt
コード例 #4
0
def avg_acc2(grid_eval_path):
    grid_infos = read_lines(grid_eval_path)
    before_vision_accs = [0.0, 0.0, 0.0, 0.0]
    before_fusion_accs = [0.0, 0.0, 0.0]
    after_vision_accs = [0.0, 0.0, 0.0]
    after_fusion_accs = [0.0, 0.0, 0.0]
    i_cv_cnt = 0
    for i, grid_info in enumerate(grid_infos):
        if i % 2 != 0:
            accs = grid_info.split()
            for j in range(4):
                before_vision_accs[j] += float(accs[j])

            i_cv_cnt += 1
    write('grid_eval.log', '\n' + grid_eval_path + '\n')
    write(
        'grid_eval.log', '& %.2f & %.2f & %.2f & %.2f\n' %
        (before_vision_accs[0] * 10, before_vision_accs[1] * 10,
         before_vision_accs[2] * 10, before_vision_accs[3] * 10))
コード例 #5
0
def write_unequal_rand_st_model(fusion_param):
    # fusion_param = get_fusion_param()
    rand_answer_path = fusion_param['answer_path'].replace(ctrl_msg['data_folder_path'],
                                                           ctrl_msg['data_folder_path'] + '_uerand')
    rand_folder_path = folder(rand_answer_path)
    safe_mkdir(rand_folder_path)
    # although copy all info including pid info, but not use in later training
    shutil.copy(fusion_param['answer_path'], rand_answer_path)
    rand_path = rand_folder_path + '/renew_pid.log'
    safe_remove(rand_path)

    origin_tracks = get_tracks(fusion_param)
    pid_cnt = len(origin_tracks)
    origin_pids = map(lambda x: x + 1, range(pid_cnt))
    persons_rand_predict_idx_s = [random.sample(origin_pids, pid_cnt) for _ in range(pid_cnt)]

    viz_pid_path = fusion_param['renew_pid_path']
    viz_score_path = fusion_param['renew_ac_path']
    viz_pids = read_lines(viz_pid_path)
    viz_pids = [per_viz_pids.split() for per_viz_pids in viz_pids]
    viz_scores = read_lines(viz_score_path)
    viz_scores = [per_viz_scores.split() for per_viz_scores in viz_scores]
    viz_same_pids = [
        [
            int(viz_pid) for viz_pid, viz_score in zip(per_viz_pids, per_viz_scores) if float(viz_score) > 0.7
        ] for per_viz_scores, per_viz_pids in zip(viz_scores, viz_pids)
    ]

    persons_unequal_rand_predict_idx_s = list()
    for i in range(pid_cnt):
        diff_persons = list(set(persons_rand_predict_idx_s[i]) ^ set(viz_same_pids[i]))
        diff_cnt = len(diff_persons)
        persons_unequal_rand_predict_idx_s.append(
            random.sample(diff_persons, diff_cnt)
        )

    write_content = ''
    for rand_predict_idx_s in persons_unequal_rand_predict_idx_s:
        for rand_predict_idx in rand_predict_idx_s:
            write_content += str(rand_predict_idx) + ' '
        write_content += '\n'
    write(rand_path, write_content)
コード例 #6
0
ファイル: split_duke.py プロジェクト: ahangchen/TrackViz
def duke_tracks_realloc(tracks, track_name, data_type):
    target_list_path = '../data/%s/%s.list' % (track_name, data_type)
    target_track_info_dir_path = '/home/cwh/coding/TrackViz/data/%s' % track_name
    safe_mkdir(target_track_info_dir_path)
    target_track_dir_path = '/home/cwh/coding/%s' % track_name
    safe_mkdir(target_track_dir_path)
    target_track_type_dir_path = '/home/cwh/coding/%s/%s' % (track_name,
                                                             data_type)
    safe_mkdir(target_track_type_dir_path)
    names = list()
    for track in tracks:
        img_name = '%04d_c%d_f%07d.jpg' % (int(track[0]), track[1], track[2])
        shutil.copyfile(
            '/home/cwh/coding/DukeMTMC-reID/%s/%s' % (data_type, img_name),
            '%s/%s' % (target_track_type_dir_path, img_name))
        names.append(img_name)
        names.append('\n')
    list_stmt = ''.join(names)
    safe_remove(target_list_path)
    write(target_list_path, list_stmt)
コード例 #7
0
def avg_acc(grid_eval_path):
    grid_infos = read_lines(grid_eval_path)
    before_vision_accs = [0.0, 0.0, 0.0]
    before_fusion_accs = [0.0, 0.0, 0.0]
    after_vision_accs = [0.0, 0.0, 0.0]
    after_fusion_accs = [0.0, 0.0, 0.0]
    i_cv_cnt = 0
    for i, grid_info in enumerate(grid_infos):
        if i % 2 != 0:
            accs = grid_info.split()
            if i_cv_cnt % 4 == 0:
                for j in range(3):
                    before_vision_accs[j] += float(accs[j])
            if i_cv_cnt % 4 == 1:
                for j in range(3):
                    before_fusion_accs[j] += float(accs[j])
            if i_cv_cnt % 4 == 2:
                for j in range(3):
                    after_vision_accs[j] += float(accs[j])
            if i_cv_cnt % 4 == 3:
                for j in range(3):
                    after_fusion_accs[j] += float(accs[j])
            i_cv_cnt += 1
    write('grid_eval.log', '\n' + grid_eval_path + '\n')
    write(
        'grid_eval.log', 'before_retrain_vision\n%f %f %f\n' %
        (before_vision_accs[0] / 10, before_vision_accs[1] / 10,
         before_vision_accs[2] / 10))
    write(
        'grid_eval.log', 'before_retrain_fusion\n%f %f %f\n' %
        (before_fusion_accs[0] / 10, before_fusion_accs[1] / 10,
         before_fusion_accs[2] / 10))
    write(
        'grid_eval.log', 'after_retrain_vision\n%f %f %f\n' %
        (after_vision_accs[0] / 10, after_vision_accs[1] / 10,
         after_vision_accs[2] / 10))
    write(
        'grid_eval.log', 'after_retrain_fusion\n%f %f %f\n' %
        (after_fusion_accs[0] / 10, after_fusion_accs[1] / 10,
         after_fusion_accs[2] / 10))
コード例 #8
0
def write_rand_pid(fusion_param):
    # 对每张左图, 随机生成250个右图的pid,相当于一个随机的renew_pid.log,rand_path也会每次都删除,所以不存在缓存
    # todo 不一定需要生成250个右图
    # fusion_param = get_fusion_param()
    rand_answer_path = fusion_param['answer_path'].replace(ctrl_msg['data_folder_path'], ctrl_msg['data_folder_path'] + '_rand')
    rand_folder_path = folder(rand_answer_path)
    safe_mkdir(rand_folder_path)
    # although copy all info including pid info, but not use in later training
    shutil.copy(fusion_param['answer_path'], rand_answer_path)
    rand_path = rand_folder_path + '/renew_pid.log'
    safe_remove(rand_path)

    origin_tracks = get_tracks(fusion_param)
    pid_cnt = len(origin_tracks)
    origin_pids = map(lambda x: x+1, range(pid_cnt))
    persons_rand_predict_idx_s = [random.sample(origin_pids, pid_cnt) for _ in range(pid_cnt)]
    write_content = ''
    for rand_predict_idx_s in persons_rand_predict_idx_s:
        for rand_predict_idx in rand_predict_idx_s:
            write_content += str(rand_predict_idx) + ' '
        write_content += '\n'
    write(rand_path, write_content)
コード例 #9
0
ファイル: st_filter.py プロジェクト: ahangchen/TrackViz
def simple_fusion_st_img_ranker(fusion_param):
    ep = fusion_param['ep']
    en = fusion_param['en']
    # 从renew_pid和renew_ac获取预测的人物id和图像分数
    persons_ap_scores = predict_img_scores(fusion_param)
    persons_ap_pids = predict_pids(fusion_param)
    # 从磁盘获取之前建立的时空模型,以及随机时空模型
    camera_delta_s = pickle_load(fusion_param['distribution_pickle_path'])

    real_tracks = train_tracks(fusion_param)
    # 计算时空评分和随机时空评分
    persons_track_scores = predict_track_scores(real_tracks, camera_delta_s, fusion_param)

    persons_cross_scores = list()
    log_path = fusion_param['eval_fusion_path']
    map_score_path = fusion_param['fusion_normal_score_path']
    safe_remove(map_score_path)
    safe_remove(log_path)
    line_log_cnt = 10

    for i, person_ap_pids in enumerate(persons_ap_pids):
        cross_scores = list()
        for j, person_ap_pid in enumerate(person_ap_pids):
            cross_score = persons_track_scores[i][j] * persons_ap_scores[i][j]
            cross_scores.append(cross_score)
        persons_cross_scores.append(cross_scores)
    print 'img score ready'
    max_score = max([max(predict_cross_scores) for predict_cross_scores in persons_cross_scores])

    for i, person_cross_scores in enumerate(persons_cross_scores):
        for j, person_cross_score in enumerate(person_cross_scores):
            if person_cross_score > 0:
                # diff seq not sort and not normalize
                persons_cross_scores[i][j] /= max_score
            else:
                persons_cross_scores[i][j] *= -1.
            # if real_tracks[i][1] == real_tracks[persons_ap_pids[i][j]][1]:
            #     # print '%d, %d' % (i, j)
            #     persons_cross_scores[i][j] = 0
    person_score_idx_s = list()
    top1_scores = list()
    print 'above person score ready'
    for i, person_cross_scores in enumerate(persons_cross_scores):
        # 单个probe的预测结果中按score排序,得到index,用于对pid进行排序
        sort_score_idx_s = sorted(range(len(person_cross_scores)), key=lambda k: -person_cross_scores[k])
        person_score_idx_s.append(sort_score_idx_s)
        # 统计top1分布,后面计算中位数用
        top1_scores.append(person_cross_scores[sort_score_idx_s[0]])
    # 降序排,取前60%处的分数
    sorted_top1_scores = sorted(top1_scores, reverse=True)
    mid_score = sorted_top1_scores[int(len(sorted_top1_scores) * 0.5)]
    mid_score_path = fusion_param['mid_score_path']
    safe_remove(mid_score_path)
    write(mid_score_path, '%f\n' % mid_score)
    print(str(mid_score))
    sorted_persons_ap_pids = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
    sorted_persons_ap_scores = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
    for i, person_ap_pids in enumerate(persons_ap_pids):
        for j in range(len(person_ap_pids)):
            sorted_persons_ap_pids[i][j] = persons_ap_pids[i][person_score_idx_s[i][j]]
            sorted_persons_ap_scores[i][j] = persons_cross_scores[i][person_score_idx_s[i][j]]
    np.savetxt(log_path, sorted_persons_ap_pids, fmt='%d')
    np.savetxt(map_score_path, sorted_persons_ap_scores, fmt='%f')
コード例 #10
0
ファイル: st_filter.py プロジェクト: ahangchen/TrackViz
def fusion_st_img_ranker(fusion_param):
    ep = fusion_param['ep']
    en = fusion_param['en']
    # 从renew_pid和renew_ac获取预测的人物id和图像分数
    persons_ap_scores = predict_img_scores(fusion_param)
    persons_ap_pids = predict_pids(fusion_param)
    # 从磁盘获取之前建立的时空模型,以及随机时空模型
    camera_delta_s = pickle_load(fusion_param['distribution_pickle_path'])
    rand_delta_s = pickle_load(fusion_param['rand_distribution_pickle_path'])
    # diff_delta_s = pickle_load(fusion_param['rand_distribution_pickle_path'].replace('rand', 'diff'))

    real_tracks = train_tracks(fusion_param)
    # 计算时空评分和随机时空评分
    persons_track_scores = predict_track_scores(real_tracks, camera_delta_s, fusion_param)
    rand_track_scores = predict_track_scores(real_tracks, rand_delta_s, fusion_param)
    diff_track_scores = rand_track_scores #predict_track_scores(real_tracks, diff_delta_s, fusion_param)

    persons_cross_scores = list()
    log_path = fusion_param['eval_fusion_path']
    map_score_path = fusion_param['fusion_normal_score_path']
    safe_remove(map_score_path)
    safe_remove(log_path)
    line_log_cnt = 10

    for i, person_ap_scores in enumerate(persons_ap_scores):
        cur_max_vision = max(person_ap_scores)
        cur_min_vision = min(person_ap_scores)
        persons_ap_scores[i] = (persons_ap_scores[i] - cur_min_vision) / (cur_max_vision - cur_min_vision)
        persons_ap_scores[i] = np.exp(persons_ap_scores[i] * 3)
        cur_max_vision = max(persons_ap_scores[i])
        cur_min_vision = min(persons_ap_scores[i])
        persons_ap_scores[i] = (persons_ap_scores[i] - cur_min_vision) / (cur_max_vision - cur_min_vision)

    for i, person_ap_pids in enumerate(persons_ap_pids):
        cross_scores = list()
        for j, person_ap_pid in enumerate(person_ap_pids):
            if rand_track_scores[i][j] < 0.00002:
                cross_score = (persons_track_scores[i][j]*(1-ep) - en*diff_track_scores[i][j]) * (persons_ap_scores[i][j]+ep/(1-ep-en)) / 0.00002
            else:
                cross_score = (persons_track_scores[i][j] * (1 - ep) - en * diff_track_scores[i][j]) * (
                persons_ap_scores[i][j] + ep / (1 - ep - en)) / rand_track_scores[i][j]

            cross_scores.append(cross_score)
        persons_cross_scores.append(cross_scores)
    print 'img score ready'
    max_score = max([max(predict_cross_scores) for predict_cross_scores in persons_cross_scores])
    print 'max_cross_score %f' % max_score

    for i, person_cross_scores in enumerate(persons_cross_scores):
        for j, person_cross_score in enumerate(person_cross_scores):
            if person_cross_score > 0: # diff camera
                # diff seq not sort and not normalize
                persons_cross_scores[i][j] /= max_score
            else: # same camera and (diff camere && rand score == -1    )
                persons_cross_scores[i][j] = persons_ap_scores[i][j]
            # if real_tracks[i][1] == real_tracks[persons_ap_pids[i][j]][1]:
            #     # print '%d, %d' % (i, j)
            #     persons_cross_scores[i][j] = 0
            if i == 0 and j % 100 == 0:
                print 'st:%f st_diff%f vis:%f fus:%f' % (
                    persons_track_scores[i][j], diff_track_scores[i][j], persons_ap_scores[i][j], persons_cross_scores[i][j])

    person_score_idx_s = list()
    top1_scores = list()
    print 'above person score ready'
    for i, person_cross_scores in enumerate(persons_cross_scores):
        # 单个probe的预测结果中按score排序,得到index,用于对pid进行排序
        sort_score_idx_s = sorted(range(len(person_cross_scores)), key=lambda k: -person_cross_scores[k])
        person_score_idx_s.append(sort_score_idx_s)
        # 统计top1分布,后面计算中位数用
        top1_scores.append(person_cross_scores[sort_score_idx_s[0]])
    # 降序排,取前60%处的分数
    sorted_top1_scores = sorted(top1_scores, reverse=True)
    mid_score = sorted_top1_scores[int(len(sorted_top1_scores) * 0.5)]
    mid_score_path = fusion_param['mid_score_path']
    safe_remove(mid_score_path)
    write(mid_score_path, '%f\n' % mid_score)
    print(str(mid_score))
    sorted_persons_ap_pids = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
    sorted_persons_ap_scores = np.zeros([len(persons_ap_pids), len(persons_ap_pids[0])])
    for i, person_ap_pids in enumerate(persons_ap_pids):
        for j in range(len(person_ap_pids)):
            sorted_persons_ap_pids[i][j] = persons_ap_pids[i][person_score_idx_s[i][j]]
            sorted_persons_ap_scores[i][j] = persons_cross_scores[i][person_score_idx_s[i][j]]
    np.savetxt(log_path, sorted_persons_ap_pids, fmt='%d')
    np.savetxt(map_score_path, sorted_persons_ap_scores, fmt='%f')
コード例 #11
0
ファイル: cnn_prophet.py プロジェクト: ziyubiti/GDLnotes
def conv_train(train_dataset,
               train_labels,
               valid_dataset,
               valid_labels,
               test_dataset,
               test_labels,
               image_size,
               num_labels,
               basic_hps,
               stride_ps,
               lrd=False):
    batch_size = basic_hps['batch_size']
    patch_size = basic_hps['patch_size']
    depth = basic_hps['depth']
    num_hidden = basic_hps['num_hidden']
    num_channels = 1
    layer_cnt = basic_hps['layer_sum']
    loss_collect = list()
    first_hidden_num = basic_hps['num_hidden']
    second_hidden_num = first_hidden_num / 2 + 1

    graph = tf.Graph()
    with graph.as_default():
        # Input data.
        tf_train_dataset = tf.placeholder(tf.float32,
                                          shape=(batch_size, image_size,
                                                 image_size, num_channels))
        tf_train_labels = tf.placeholder(tf.float32,
                                         shape=(batch_size, num_labels))
        tf_valid_dataset = tf.constant(valid_dataset)
        tf_test_dataset = tf.constant(test_dataset)

        input_weights = tf.Variable(
            tf.truncated_normal([patch_size, patch_size, num_channels, depth],
                                stddev=0.1))
        input_biases = tf.Variable(tf.zeros([depth]))
        mid_layer_cnt = layer_cnt - 1
        layer_weights = list()
        layer_biases = [
            tf.Variable(tf.constant(1.0, shape=[depth / (i + 2)]))
            for i in range(mid_layer_cnt)
        ]
        output_weights = list()
        output_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
        first_nn_weights = tf.Variable(
            tf.truncated_normal([first_hidden_num, second_hidden_num],
                                stddev=0.1))
        second_nn_weights = tf.Variable(
            tf.truncated_normal([second_hidden_num, num_labels], stddev=0.1))
        first_nn_biases = tf.Variable(
            tf.constant(1.0, shape=[second_hidden_num]))
        second_nn_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))

        # Model.
        def model(data, init=False):
            # Variables.
            if not large_data_size(data) or not large_data_size(input_weights):
                stride_ps[0] = [1, 1, 1, 1]
            conv = tf.nn.conv2d(data,
                                input_weights,
                                stride_ps[0],
                                use_cudnn_on_gpu=True,
                                padding='SAME')
            conv = maxpool2d(conv)
            hidden = tf.nn.relu(conv + input_biases)
            if init:
                hidden = tf.nn.dropout(hidden, 0.8)
            for i in range(mid_layer_cnt):
                # print(hidden)
                if init:
                    # avoid filter shape larger than input shape
                    hid_shape = hidden.get_shape()
                    # print(hid_shape)
                    filter_w = patch_size / (i + 1)
                    filter_h = patch_size / (i + 1)
                    # print(filter_w)
                    # print(filter_h)
                    if filter_w > hid_shape[1]:
                        filter_w = int(hid_shape[1])
                    if filter_h > hid_shape[2]:
                        filter_h = int(hid_shape[2])
                    layer_weight = tf.Variable(
                        tf.truncated_normal(shape=[
                            filter_w, filter_h, depth / (i + 1),
                            depth / (i + 2)
                        ],
                                            stddev=0.1))
                    layer_weights.append(layer_weight)
                if not large_data_size(hidden) or not large_data_size(
                        layer_weights[i]):
                    # print("is not large data")
                    stride_ps[i + 1] = [1, 1, 1, 1]
                # print(stride_ps[i + 1])
                # print(len(stride_ps))
                # print(i + 1)
                conv = tf.nn.conv2d(hidden,
                                    layer_weights[i],
                                    stride_ps[i + 1],
                                    use_cudnn_on_gpu=True,
                                    padding='SAME')
                if not large_data_size(conv):
                    conv = maxpool2d(conv, 1, 1)
                else:
                    conv = maxpool2d(conv)
                hidden = tf.nn.relu(conv + layer_biases[i])
                if init:
                    hidden = tf.nn.dropout(hidden, 0.8)

            shapes = hidden.get_shape().as_list()
            shape_mul = 1
            for s in shapes[1:]:
                shape_mul *= s

            if init:
                output_size = shape_mul
                output_weights.append(
                    tf.Variable(
                        tf.truncated_normal([output_size, num_hidden],
                                            stddev=0.1)))
            reshape = tf.reshape(hidden, [shapes[0], shape_mul])

            hidden = tf.nn.relu6(
                tf.matmul(reshape, output_weights[0]) + output_biases)
            if init:
                hidden = tf.nn.dropout(hidden, 0.5)
            hidden = tf.matmul(hidden, first_nn_weights) + first_nn_biases
            if init:
                hidden = tf.nn.dropout(hidden, 0.5)
            hidden = tf.matmul(hidden, second_nn_weights) + second_nn_biases
            return hidden

        # Training computation.
        logits = model(tf_train_dataset, init=True)
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
        # Optimizer.
        starter_learning_rate = 0.1
        if lrd:
            cur_step = tf.Variable(0)  # count the number of steps taken.
            learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                                       cur_step,
                                                       10000,
                                                       0.96,
                                                       staircase=True)
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate).minimize(loss, global_step=cur_step)
        else:
            optimizer = tf.train.AdagradOptimizer(
                starter_learning_rate).minimize(loss)

        # Predictions for the training, validation, and test data.
        train_prediction = tf.nn.softmax(logits)
        valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
        test_prediction = tf.nn.softmax(model(tf_test_dataset))
    num_steps = 3001
    start_fit = 600
    init_loss = []

    with tf.Session(graph=graph) as session:
        tf.initialize_all_variables().run()
        print('Initialized')
        end_train = False
        mean_loss = 0
        for step in range(num_steps):
            if end_train:
                break
            offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
            batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
            batch_labels = train_labels[offset:(offset + batch_size), :]
            feed_dict = {
                tf_train_dataset: batch_data,
                tf_train_labels: batch_labels
            }
            _, l, predictions = session.run(
                [optimizer, loss, train_prediction], feed_dict=feed_dict)
            mean_loss += l
            if step % 5 == 0:
                mean_loss /= 5.0
                loss_collect.append(mean_loss)
                mean_loss = 0
                if step >= start_fit:
                    # print(loss_collect)
                    if step == start_fit:
                        res = fit_more(1, [
                            batch_size, depth, num_hidden, layer_cnt,
                            patch_size
                        ], loss_collect)
                    else:
                        res = fit_more(0, [
                            batch_size, depth, num_hidden, layer_cnt,
                            patch_size
                        ], loss_collect)
                    loss_collect.remove(loss_collect[0])
                    ret = res['ret']
                    if ret == 1:
                        print('ret is end train when step is {step}'.format(
                            step=step))
                        init_loss.append(loss_collect)
                        more_index = predict_future([
                            batch_size, depth, num_hidden, layer_cnt,
                            patch_size
                        ], init_loss[0])
                        print('more index: %d' % more_index)
                        for i in range(more_index):
                            offset = ((step + i + 1) * batch_size) % (
                                train_labels.shape[0] - batch_size)
                            batch_data = train_dataset[offset:(
                                offset + batch_size), :, :, :]
                            batch_labels = train_labels[offset:(offset +
                                                                batch_size), :]
                            feed_dict = {
                                tf_train_dataset: batch_data,
                                tf_train_labels: batch_labels
                            }
                            _, l, predictions = session.run(
                                [optimizer, loss, train_prediction],
                                feed_dict=feed_dict)
                            loss_collect.append(l)
                            file_helper.write(
                                '/home/cwh/coding/python/NN/line.txt',
                                str(loss_collect[20]))
                            loss_collect.remove(loss_collect[0])
                        for loss in loss_collect[21:]:
                            file_helper.write(
                                '/home/cwh/coding/python/NN/line.txt',
                                str(loss))
                        end_train = True

                        file_helper.write(
                            '/home/cwh/coding/python/NN/line.txt', '===')
                    if step % 50 == 0:
                        print('Minibatch loss at step %d: %f' % (step, l))
                        print('Validation accuracy: %.1f%%' %
                              accuracy(valid_prediction.eval(), valid_labels))

        print('Test accuracy: %.1f%%' %
              accuracy(test_prediction.eval(), test_labels))
コード例 #12
0
def save_fusion_param():
    write('data/fusion_param.json', fusion_param.__str__)
コード例 #13
0
ファイル: cnn_prophet.py プロジェクト: ahangchen/GDLnotes
def conv_train(train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels, image_size,
               num_labels, basic_hps, stride_ps, lrd=False):
    batch_size = basic_hps['batch_size']
    patch_size = basic_hps['patch_size']
    depth = basic_hps['depth']
    num_hidden = basic_hps['num_hidden']
    num_channels = 1
    layer_cnt = basic_hps['layer_sum']
    loss_collect = list()
    first_hidden_num = basic_hps['num_hidden']
    second_hidden_num = first_hidden_num / 2 + 1

    graph = tf.Graph()
    with graph.as_default():
        # Input data.
        tf_train_dataset = tf.placeholder(
            tf.float32, shape=(batch_size, image_size, image_size, num_channels))
        tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
        tf_valid_dataset = tf.constant(valid_dataset)
        tf_test_dataset = tf.constant(test_dataset)

        input_weights = tf.Variable(tf.truncated_normal(
            [patch_size, patch_size, num_channels, depth], stddev=0.1))
        input_biases = tf.Variable(tf.zeros([depth]))
        mid_layer_cnt = layer_cnt - 1
        layer_weights = list()
        layer_biases = [tf.Variable(tf.constant(1.0, shape=[depth / (i + 2)])) for i in range(mid_layer_cnt)]
        output_weights = list()
        output_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
        first_nn_weights = tf.Variable(tf.truncated_normal(
            [first_hidden_num, second_hidden_num], stddev=0.1))
        second_nn_weights = tf.Variable(tf.truncated_normal(
            [second_hidden_num, num_labels], stddev=0.1))
        first_nn_biases = tf.Variable(tf.constant(1.0, shape=[second_hidden_num]))
        second_nn_biases = tf.Variable(tf.constant(1.0, shape=[num_labels]))

        # Model.
        def model(data, init=False):
            # Variables.
            if not large_data_size(data) or not large_data_size(input_weights):
                stride_ps[0] = [1, 1, 1, 1]
            conv = tf.nn.conv2d(data, input_weights, stride_ps[0], use_cudnn_on_gpu=True, padding='SAME')
            conv = maxpool2d(conv)
            hidden = tf.nn.relu(conv + input_biases)
            if init:
                hidden = tf.nn.dropout(hidden, 0.8)
            for i in range(mid_layer_cnt):
                # print(hidden)
                if init:
                    # avoid filter shape larger than input shape
                    hid_shape = hidden.get_shape()
                    # print(hid_shape)
                    filter_w = patch_size / (i + 1)
                    filter_h = patch_size / (i + 1)
                    # print(filter_w)
                    # print(filter_h)
                    if filter_w > hid_shape[1]:
                        filter_w = int(hid_shape[1])
                    if filter_h > hid_shape[2]:
                        filter_h = int(hid_shape[2])
                    layer_weight = tf.Variable(tf.truncated_normal(shape=[filter_w, filter_h, depth / (i + 1), depth / (i + 2)],
                                                                   stddev=0.1))
                    layer_weights.append(layer_weight)
                if not large_data_size(hidden) or not large_data_size(layer_weights[i]):
                    # print("is not large data")
                    stride_ps[i + 1] = [1, 1, 1, 1]
                # print(stride_ps[i + 1])
                # print(len(stride_ps))
                # print(i + 1)
                conv = tf.nn.conv2d(hidden, layer_weights[i], stride_ps[i + 1], use_cudnn_on_gpu=True, padding='SAME')
                if not large_data_size(conv):
                    conv = maxpool2d(conv, 1, 1)
                else:
                    conv = maxpool2d(conv)
                hidden = tf.nn.relu(conv + layer_biases[i])
                if init:
                    hidden = tf.nn.dropout(hidden, 0.8)

            shapes = hidden.get_shape().as_list()
            shape_mul = 1
            for s in shapes[1:]:
                shape_mul *= s

            if init:
                output_size = shape_mul
                output_weights.append(tf.Variable(tf.truncated_normal([output_size, num_hidden], stddev=0.1)))
            reshape = tf.reshape(hidden, [shapes[0], shape_mul])

            hidden = tf.nn.relu6(tf.matmul(reshape, output_weights[0]) + output_biases)
            if init:
                hidden = tf.nn.dropout(hidden, 0.5)
            hidden = tf.matmul(hidden, first_nn_weights) + first_nn_biases
            if init:
                hidden = tf.nn.dropout(hidden, 0.5)
            hidden = tf.matmul(hidden, second_nn_weights) + second_nn_biases
            return hidden

        # Training computation.
        logits = model(tf_train_dataset, init=True)
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tf_train_labels))
        # Optimizer.
        starter_learning_rate = 0.1
        if lrd:
            cur_step = tf.Variable(0)  # count the number of steps taken.
            learning_rate = tf.train.exponential_decay(starter_learning_rate, cur_step, 10000, 0.96, staircase=True)
            optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=cur_step)
        else:
            optimizer = tf.train.AdagradOptimizer(starter_learning_rate).minimize(loss)

        # Predictions for the training, validation, and test data.
        train_prediction = tf.nn.softmax(logits)
        valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
        test_prediction = tf.nn.softmax(model(tf_test_dataset))
    num_steps = 3001
    start_fit = 600
    init_loss = []

    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        print('Initialized')
        end_train = False
        mean_loss = 0
        for step in range(num_steps):
            if end_train:
                break
            offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
            batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
            batch_labels = train_labels[offset:(offset + batch_size), :]
            feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
            _, l, predictions = session.run(
                [optimizer, loss, train_prediction], feed_dict=feed_dict)
            mean_loss += l
            if step % 5 == 0:
                mean_loss /= 5.0
                loss_collect.append(mean_loss)
                mean_loss = 0
                if step >= start_fit:
                    # print(loss_collect)
                    if step == start_fit:
                        res = fit_more(1, [batch_size, depth, num_hidden, layer_cnt, patch_size], loss_collect)
                    else:
                        res = fit_more(0, [batch_size, depth, num_hidden, layer_cnt, patch_size], loss_collect)
                    loss_collect.remove(loss_collect[0])
                    ret = res['ret']
                    if ret == 1:
                        print('ret is end train when step is {step}'.format(step=step))
                        init_loss.append(loss_collect)
                        more_index = predict_future([batch_size, depth, num_hidden, layer_cnt, patch_size], init_loss[0])
                        print('more index: %d' % more_index)
                        for i in range(more_index):
                            offset = ((step + i + 1) * batch_size) % (train_labels.shape[0] - batch_size)
                            batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
                            batch_labels = train_labels[offset:(offset + batch_size), :]
                            feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
                            _, l, predictions = session.run(
                                [optimizer, loss, train_prediction], feed_dict=feed_dict)
                            loss_collect.append(l)
                            file_helper.write('/home/cwh/coding/python/NN/line.txt', str(loss_collect[20]))
                            loss_collect.remove(loss_collect[0])
                        for loss in loss_collect[21:]:
                            file_helper.write('/home/cwh/coding/python/NN/line.txt', str(loss))
                        end_train = True

                        file_helper.write('/home/cwh/coding/python/NN/line.txt', '===')
                    if step % 50 == 0:
                        print('Minibatch loss at step %d: %f' % (step, l))
                        print('Validation accuracy: %.1f%%' % accuracy(
                            valid_prediction.eval(), valid_labels))

        print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
コード例 #14
0
from util.file_helper import read_lines, write
import seaborn as sns
import numpy as np

if __name__ == '__main__':
    # for i in range(10):
    for i in range(1):

        person_lines = read_lines('data/top-m2g-std%d-train/cross_filter_pid.log' % i)
        predict_persons = [predict_line.split() for predict_line in person_lines]
        score_lines = read_lines('data/top-m2g-std%d-train/cross_filter_score.log' % i)
        predict_scores = [score_line.split() for score_line in score_lines]
        write('data/top1.txt', 'std %d top1\n' % i)
        scores = list()
        pos_cnt = 0
        in_cnt = 0
        up_b = 1.
        down_b = 0.16
        for k in range(10):
            for j in range(len(predict_persons)):
                score = float(predict_scores[j][0])
                r = int(predict_persons[j][0]) - j - 1
                if abs(r) == 1:
                    write('data/top1.txt', 'left %d, right %d, score %f\n' % (j + 1, int(predict_persons[j][0]), score))
                scores.append(score)
                if down_b < score < up_b:
                    in_cnt += 1
                    if abs(r) == 1:
                        pos_cnt += 1
            if in_cnt == 0:
                in_cnt = 1