示例#1
0
def clean_up(is_match):
    gp.print_log(gp.LogLevel.Normal, 'Cleaning Up Folders')
    anchor_result_dir = gp.result_dir[:-1] + '_anchor/'
    if gp.connection_type == gp.connection[1]:
        if is_match:
            gp.remove_dir(gp.cur_log_dir)
            gp.remove_dir(gp.ref_log_dir)
            gp.remove_dir(gp.result_dir)
            gp.remove_dir(anchor_result_dir)
            if gp.cur_platform != 'Linux':
                for seqIdx in gp.sequences:
                    gp.remove_dir(gp.sequence_dir + gp.sequences[seqIdx])
        else:
            gp.remove_dir(gp.result_dir)
            gp.remove_dir(anchor_result_dir)
    elif gp.connection_type == gp.connection[0]:
        if gp.mode == 'Overnight':
            if gp.save_to_backup_dir:
                gp.create_dir(gp.backup_log_dir)
                gp.zip_to_folder(gp.cur_log_dir[:-1] + '.zip', gp.cur_log_dir,
                                 gp.backup_log_dir)

                gp.create_dir(gp.temp_dir)
                zip_name = gp.cur_time + gp.folder_join + gp.cur_commit_id[
                    0:7] + '.zip'
                gp.zip_to_folder(zip_name, gp.result_dir, gp.temp_dir)

            gp.remove_dir(gp.cur_log_dir)
            gp.remove_dir(gp.ref_log_dir)
            gp.remove_dir(gp.result_dir)
        else:
            gp.remove_dir(anchor_result_dir)
            os_system('mv ' + gp.result_dir + ' ' + anchor_result_dir)
        gp.print_log(gp.LogLevel.Normal, 'Finish Cleaning Up Folders')
示例#2
0
def scale_yuv(*args):
    seq_idx = args[0]
    gp.print_log(gp.LogLevel.Normal,
                 '\tScaling ' + gp.sequences[seq_idx] + '...')
    ori_sequence = gp.sequences[seq_idx] + '.yuv'
    sequence_dir = gp.generate_dir_path(gp.sequence_dir, gp.sequences[seq_idx])
    if not os.path.exists(sequence_dir):
        gp.create_dir(sequence_dir)
    if not os.path.exists(sequence_dir + ori_sequence):
        if gp.cur_platform == 'Linux':
            os_system('cp ' + gp.sequence_dir + gp.sequences[seq_idx] +
                      '.yuv ' + sequence_dir)
        else:
            os_system('unzip ' + gp.sequence_dir + gp.sequences[seq_idx] +
                      '.yuv.zip -d ' + sequence_dir)

    tmp = ori_sequence.split('_')
    ori_width = int(tmp[-2].split('x')[0])
    ori_height = int(tmp[-2].split('x')[1].split('p')[0])
    ori_fps = int(tmp[-2].split('x')[1].split('p')[1])

    for seq_name in gp.seq_candidates[seq_idx]:
        tmp_seq_name = seq_name.split('_')
        width = int(tmp_seq_name[3].split('p')[0].split('x')[0])
        height = int(tmp_seq_name[3].split('p')[0].split('x')[1])
        fps = int(tmp_seq_name[3].split('p')[1])

        scale_seq_name = ori_sequence
        scale_width = ori_width
        scale_height = ori_height
        if width * ori_height != height * ori_width:
            if width * 1.0 / height < ori_width * 1.0 / ori_height:
                scale_width = ori_height * width / height
                scale_height = ori_height
            else:
                scale_width = ori_width
                scale_height = ori_width * height / width

            scale_seq_name = tmp[0] + '_' + tmp[1] + '_' + tmp[2] + '_' \
                             + str(scale_width) + 'x' + str(scale_height) + 'p' \
                             + str(ori_fps) + '_' + tmp[4]
            if not os.path.exists(sequence_dir + scale_seq_name):
                os_system(gp.scale.get_full_path_executable() + ' ' +
                          str(ori_width) + ' ' + str(ori_height) + ' ' +
                          str(scale_width) + ' ' + str(scale_height) + ' ' +
                          sequence_dir + ori_sequence + ' ' + sequence_dir +
                          scale_seq_name + ' 4 1')
                gp.print_log(gp.LogLevel.Normal, 'Scaling ' + scale_seq_name)

        if not os.path.exists(sequence_dir + seq_name):
            os_system(gp.scale.get_full_path_executable() + ' ' +
                      str(scale_width) + ' ' + str(scale_height) + ' ' +
                      str(width) + ' ' + str(height) + ' ' + sequence_dir +
                      scale_seq_name + ' ' + sequence_dir + seq_name + ' 3 ' +
                      str(ori_fps) + ' ' + str(fps))
            gp.print_log(gp.LogLevel.Normal, 'Scaling ' + seq_name)
示例#3
0
def copy_corresponding_log(case_name, scenario):
    case_dir = gp.generate_dir_path(gp.problem_case_dir, scenario, case_name)
    if os.path.exists(case_dir):
        return

    cur_case = find_corresponding_case_path(case_name, scenario,
                                            gp.cur_log_dir)
    dest_dir = gp.generate_dir_path(gp.problem_case_dir, scenario, case_name,
                                    'cur')
    gp.create_dir(dest_dir)
    os.system('cp -rf ' + cur_case + '* ' + dest_dir)

    ref_case = find_corresponding_case_path(case_name, scenario,
                                            gp.ref_log_dir)
    dest_dir = gp.generate_dir_path(gp.problem_case_dir, scenario, case_name,
                                    'ref')
    gp.create_dir(dest_dir)
    os.system('cp -rf ' + ref_case + '* ' + dest_dir)
示例#4
0
def set_up(log_folder_prefix):
    """
    tmp
    """
    if sys.argv[2] == 'save_log':
        gp.save_to_backup_dir = True
    else:
        gp.save_to_backup_dir = False

    gp.cur_commit_id = sys.argv[3][0:7]
    gp.commit_time = int(sys.argv[4])
    if len(sys.argv) == 5:
        gp.ref_commit_id = ao.find_latest_ref_log('')
    else:
        gp.ref_commit_id = ao.find_latest_ref_log(sys.argv[5][0:7])
    gp.cur_log_dir = log_folder_prefix + gp.folder_join + gp.cur_commit_id + gp.folder_join + str(
        gp.commit_time) + '/'
    gp.create_dir(gp.cur_log_dir)
示例#5
0
def saving_logs(scenario):
    result_dir = gp.generate_dir_path(gp.result_dir, scenario)
    if gp.connection_type == gp.connection[0]:
        dst_dir = gp.generate_dir_path(gp.cur_log_dir, scenario)
        os.system('cp ' + gp.client.dir_ + 'commit.log ' + gp.cur_log_dir)
    else:
        dst_dir = gp.generate_dir_path(gp.ref_log_dir, scenario)
        os.system('cp ' + gp.client.dir_ + 'commit.log ' + gp.ref_log_dir)

    room_list = os.listdir(result_dir)

    for room in room_list:
        if os.path.isdir(result_dir + room):
            uid_list = os.listdir(result_dir + room)
            room_dir = gp.generate_dir_path(result_dir, room)
            for uid in uid_list:
                if os.path.isdir(room_dir + uid):
                    cur_uid_dir = gp.generate_dir_path(room_dir, uid)

                    doc_list = os.listdir(cur_uid_dir)
                    dst_uid_dir = gp.generate_dir_path(dst_dir, room, uid)
                    gp.create_dir(dst_uid_dir)
                    for doc in doc_list:
                        if re.search('enc_offline_test_0', doc) \
                                or re.search('enc_online_parameters', doc) \
                                or re.search('enc_save_sent_stream', doc) \
                                or re.search('enc_save_stream_info_0', doc):
                            os.system('cp ' + cur_uid_dir + doc + ' ' +
                                      dst_uid_dir)
                        if gp.mode == 'Overnight':
                            if re.search('dec_offline_test', doc) \
                                    or re.search('dec_save_stream_info', doc) \
                                    or re.search('dec_quality_score', doc) \
                                    or re.search('dec_save_stream_received', doc) \
                                    or re.search('vqmg', doc) \
                                    or re.search('crash', doc) \
                                    or re.search('timestamp', doc):
                                os.system('cp ' + cur_uid_dir + doc + ' ' +
                                          dst_uid_dir)
示例#6
0
def gen_dir(scenario):
    gp.print_log(gp.LogLevel.Normal, 'Generating directories...')
    gp.print_log(gp.LogLevel.Normal, 'Creating necessary folders...')
    result_secnario_dir = gp.generate_dir_path(gp.result_dir, scenario)
    gp.create_dir(result_secnario_dir)
    if gp.connection_type == gp.connection[1]:
        ref_log_scenario_dir = gp.generate_dir_path(gp.ref_log_dir, scenario)
        gp.create_dir(ref_log_scenario_dir)
    if gp.mode == 'Overnight' and not os.path.isdir(gp.backup_log_dir):
        gp.create_dir(gp.backup_log_dir)
    if gp.mode == 'Regression':
        if gp.connection_type == gp.connection[0]:
            gp.client.set_executable_dir(
                gp.generate_dir_path(gp.executable_dir, 'anchor'))
        else:
            gp.client.set_executable_dir(
                gp.generate_dir_path(gp.executable_dir, 'test'))
    else:
        gp.client.set_executable_dir(
            gp.generate_dir_path(gp.executable_dir, 'overnight'))
示例#7
0
def compare_data():
    if gp.cur_log_dir[-1] != '/':
        gp.cur_log_dir += '/'
    if gp.ref_log_dir[-1] != '/':
        gp.ref_log_dir += '/'

    ref_list = os.listdir(gp.ref_log_dir)
    cur_list = os.listdir(gp.cur_log_dir)

    ref_folder = gp.ref_log_dir.split('/')[-2]
    cur_folder = gp.cur_log_dir.split('/')[-2]

    problem_zip_name = gp.cur_time + gp.folder_join + cur_folder.split(gp.folder_join)[1][0:7] \
                       + '_vs_' + ref_folder.split(gp.folder_join)[1][0:7]
    gp.problem_case_dir = gp.generate_dir_path(gp.data_dir, problem_zip_name)
    gp.create_dir(gp.problem_case_dir)
    if not os.path.isdir(gp.problem_dir):
        gp.create_dir(gp.problem_dir)

    header = '<h1>\nThis email is the overnight comparison results\n</h1>'
    header += '<h2>\n Current Commit:\n</h2>' + gp.read_commit_log(gp.cur_log_dir) + \
              '<h3>Run on ' + gp.convert_date(cur_folder.split(gp.folder_join)[0]) + '</h3>'
    header += '<h2>\n Ref Commit:\n</h2>' + gp.read_commit_log(gp.ref_log_dir) + \
              '<h3>Run on ' + gp.convert_date(ref_folder.split(gp.folder_join)[0]) + '</h3>'
    if gp.total_crash != 0:
        header += '<h2>\n<span style="color: red"> Total Crash: ' + str(
            gp.total_crash) + '</span>\n</h2>'
    else:
        header += '<h2>\n No Crash.\n</h2>'

    result = ''
    #brief-result
    brief_result = ''

    for scenario in gp.scenario:
        result += '<hr>\n'
        found_case = True
        ref_enc_file = find_file('Enc_File_' + scenario, ref_list)
        ref_dec_file = find_file('Dec_File_' + scenario, ref_list)
        if ref_enc_file != '' and ref_dec_file != '':
            ref_case_set = cc.CaseSummary(gp.ref_log_dir + ref_enc_file,
                                          gp.ref_log_dir + ref_dec_file)
        else:
            found_case = False

        cur_enc_file = find_file('Enc_File_' + scenario, cur_list)
        cur_dec_file = find_file('Dec_File_' + scenario, cur_list)
        if cur_enc_file != '' and cur_dec_file != '':
            cur_case_set = cc.CaseSummary(gp.cur_log_dir + cur_enc_file,
                                          gp.cur_log_dir + cur_dec_file)
        else:
            found_case = False

        if found_case:
            result += compare_encoder_performance(cur_case_set, ref_case_set,
                                                  scenario)
            result += compare_decoder_performance(cur_case_set, ref_case_set,
                                                  scenario)
            brief_result += compare_encoder_performance_brief(scenario)
            brief_result += compare_decoder_performance_brief(scenario)

    if gp.total_mismatch != 0:
        header += '<h2>\n<span style="color: red"> Total Mismatch: ' + str(
            gp.total_mismatch) + '</span>\n</h2>'
    else:
        header += '<h2>\n No Mismatch.\n</h2>'

    output = '<html>\n<head>\n' + header + result + '</head>\n</html>\n'
    brief_output = '<html>\n<head>\n' + header + brief_result + '</head>\n</html>\n'

    result_file = cur_folder.split(
        gp.folder_join)[1][:7] + '_vs_' + ref_folder.split(
            gp.folder_join)[1][:7] + '.html'
    result_file_handle = open(gp.problem_case_dir + result_file, 'w')
    result_file_handle.write(output)
    result_file_handle.close()

    brief_result_file = cur_folder.split(
        gp.folder_join)[1][:7] + '_vs_' + ref_folder.split(
            gp.folder_join)[1][:7] + '_brief.html'
    brief_result_file_handle = open(gp.problem_case_dir + brief_result_file,
                                    'w')
    brief_result_file_handle.write(brief_output)
    brief_result_file_handle.close()

    gp.move_to_dir(gp.pic_dir, gp.problem_case_dir)
    gp.zip_to_folder(problem_zip_name + '.zip', gp.problem_case_dir,
                     gp.problem_dir)
    gp.remove_dir(gp.problem_case_dir)

    return output
示例#8
0
def compare_one_dec_case(cur_case, ref_case, scenario):
    content = ''
    if cur_case.get_client_number() <= 1:
        return content

    for idx in range(0, cur_case.get_client_number()):

        if idx == 0:
            cur_content = generate_cross_row_cell(cur_case.get_case(),
                                                  cur_case.get_client_number())
            fail = 0
        else:
            cur_content = ''

        cur_content += generate_one_cell(cur_case.get_config(idx))[0]

        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_dec_data('real_fps')[idx],
                                cur_case.get_dec_data('real_fps')[idx]), 0.95,
                               1.05)
        gp.dec_comparison_class.add_one_comparison(
            scenario, 'real_fps',
            ref_case.get_dec_data('real_fps')[idx],
            cur_case.get_dec_data('real_fps')[idx])

        # (cur_content, fail) = generate_cell(cur_content, fail,
        #                                     (ref_case.get_enc_data('target_bitrate')[idx],
        #                                      cur_case.get_enc_data('target_bitrate')[idx]),
        #                                     0.95, 1.05)

        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_dec_data('real_bitrate')[idx],
                                cur_case.get_dec_data('real_bitrate')[idx]),
                               0.95, 1.05)
        gp.dec_comparison_class.add_one_comparison(
            scenario, 'real_bitrate',
            ref_case.get_dec_data('real_bitrate')[idx],
            cur_case.get_dec_data('real_bitrate')[idx])

        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_dec_data('PSNR')[idx],
                                cur_case.get_dec_data('PSNR')[idx]), 0.95,
                               1.05)
        gp.dec_comparison_class.add_one_comparison(
            scenario, 'PSNR',
            ref_case.get_dec_data('PSNR')[idx],
            cur_case.get_dec_data('PSNR')[idx])

        (cur_content,
         fail) = generate_cell(cur_content, fail, (ref_case.get_dec_data(
             'SSIM', 100)[idx], cur_case.get_dec_data('SSIM', 100)[idx]), 0.95,
                               1.05)
        gp.dec_comparison_class.add_one_comparison(
            scenario, 'SSIM',
            ref_case.get_dec_data('SSIM')[idx],
            cur_case.get_dec_data('SSIM')[idx])

        mismatch = fail
        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_dec_data('is_decodable')[idx],
                                cur_case.get_dec_data('is_decodable')[idx]), 0,
                               0)
        gp.dec_comparison_class.add_one_comparison(
            scenario, 'is_decodable',
            ref_case.get_dec_data('is_decodable')[idx],
            cur_case.get_dec_data('is_decodable')[idx])

        if fail > 0:
            ref_client_ = ref_case.client_[idx]
            cur_client_ = cur_case.client_[idx]
            cur_pic_dir = gp.pic_dir + cur_client_.get_case() + '_Decoder/'
            if gp.exists_dir(cur_pic_dir) == 0:
                gp.create_dir(cur_pic_dir)
            if len(ref_client_.decoded_client_) != len(
                    cur_client_.decoded_client_):
                continue
            for i in range(len(ref_client_.decoded_client_)):
                ref_dec = ref_client_.decoded_client_[i]
                cur_dec = cur_client_.decoded_client_[i]
                gp.drawOneDecoderClient(
                    cur_pic_dir,
                    cur_client_.get_case() + '_' + cur_client_.get_config(),
                    str(cur_dec.uid_), (cur_dec.raw_data_['real_fps'],
                                        ref_dec.raw_data_['real_fps']),
                    (cur_dec.raw_data_['real_bitrate'],
                     ref_dec.raw_data_['real_bitrate']),
                    (cur_dec.raw_data_['PSNR'], ref_dec.raw_data_['PSNR']),
                    (cur_dec.raw_data_['SSIM'], ref_dec.raw_data_['SSIM']))

        if fail > mismatch:
            gp.total_mismatch += 1

        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_dec_data('vqmg')[idx],
                                cur_case.get_dec_data('vqmg')[idx]), 0, 0)
        gp.dec_comparison_class.add_one_comparison(
            scenario, 'vqmg',
            ref_case.get_dec_data('vqmg')[idx],
            cur_case.get_dec_data('vqmg')[idx])

        content += '<tr align="center">' + cur_content + '</tr>\n'

    if fail > 0:
        return content
    else:
        return ''
示例#9
0
def compare_one_enc_case(cur_case, ref_case, scenario):
    content = ''

    for idx in range(0, cur_case.get_client_number()):
        if idx == 0:
            cur_content = generate_cross_row_cell(cur_case.get_case(),
                                                  cur_case.get_client_number())
            fail = 0
        else:
            cur_content = ''

        cur_content += generate_one_cell(cur_case.get_config(idx))[0]

        (cur_content, fail) = generate_cell(
            cur_content, fail,
            (ref_case.get_enc_data('target_purposed_ratio')[idx],
             cur_case.get_enc_data('target_purposed_ratio')[idx]), 0.7, 1.3,
            True)
        gp.enc_comparison_class.add_one_comparison(
            scenario, 'target_purposed_ratio',
            ref_case.get_enc_data('target_purposed_ratio')[idx],
            cur_case.get_enc_data('target_purposed_ratio')[idx])

        (cur_content, fail) = generate_cell(
            cur_content, fail,
            (ref_case.get_enc_data('unqualified_ratio')[idx],
             cur_case.get_enc_data('unqualified_ratio')[idx]), 0.0, 1.05, True)
        gp.enc_comparison_class.add_one_comparison(
            scenario, 'unqualified_ratio',
            ref_case.get_enc_data('unqualified_ratio')[idx],
            cur_case.get_enc_data('unqualified_ratio')[idx])

        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_enc_data('bitrate_diff')[idx],
                                cur_case.get_enc_data('bitrate_diff')[idx]),
                               0.00, 10, True)
        gp.enc_comparison_class.add_one_comparison(
            scenario, 'bitrate_diff',
            ref_case.get_enc_data('bitrate_diff')[idx],
            cur_case.get_enc_data('bitrate_diff')[idx])

        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_enc_data('real_fps')[idx],
                                cur_case.get_enc_data('real_fps')[idx]), 0.95,
                               1.05)
        gp.enc_comparison_class.add_one_comparison(
            scenario, 'real_fps',
            ref_case.get_enc_data('real_fps')[idx],
            cur_case.get_enc_data('real_fps')[idx])

        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_enc_data('PSNR')[idx],
                                cur_case.get_enc_data('PSNR')[idx]), 0.95,
                               1.05)
        gp.enc_comparison_class.add_one_comparison(
            scenario, 'PSNR',
            ref_case.get_enc_data('PSNR')[idx],
            cur_case.get_enc_data('PSNR')[idx])

        (cur_content,
         fail) = generate_cell(cur_content, fail, (ref_case.get_enc_data(
             'SSIM', 100)[idx], cur_case.get_enc_data('SSIM', 100)[idx]))
        gp.enc_comparison_class.add_one_comparison(
            scenario, 'SSIM',
            ref_case.get_enc_data('SSIM')[idx],
            cur_case.get_enc_data('SSIM')[idx])

        (cur_content,
         fail) = generate_cell(cur_content, fail,
                               (ref_case.get_enc_data('encoding_time')[idx],
                                cur_case.get_enc_data('encoding_time')[idx]))
        gp.enc_comparison_class.add_one_comparison(
            scenario, 'encoding_time',
            ref_case.get_enc_data('encoding_time')[idx],
            cur_case.get_enc_data('encoding_time')[idx])

        content += '<tr align="center">' + cur_content + '</tr>\n'

        if fail > 0:
            ref_client_ = ref_case.client_[idx]
            cur_client_ = cur_case.client_[idx]
            cur_pic_dir = gp.pic_dir + cur_client_.get_case() + '_Encoder/'
            if gp.exists_dir(cur_pic_dir) == 0:
                gp.create_dir(cur_pic_dir)
            gp.drawOneEncoderClient(
                cur_pic_dir,
                cur_client_.get_case() + '_' + cur_client_.get_config(),
                (cur_client_.raw_data_['target_bitrate'],
                 ref_client_.raw_data_['target_bitrate']),
                (cur_client_.raw_data_['real_bitrate'],
                 ref_client_.raw_data_['real_bitrate']),
                (cur_client_.raw_data_['real_fps'],
                 ref_client_.raw_data_['real_fps']),
                (cur_client_.raw_data_['PSNR'], ref_client_.raw_data_['PSNR']),
                (cur_client_.raw_data_['SSIM'], ref_client_.raw_data_['SSIM']))
    if fail > 0:
        return content
    else:
        return ''
示例#10
0
def run_one_scenario(scenario):
    """
    Only run either Comm/Live/ScSh mode
    """
    fun.gen_dir(scenario)
    fun.check_files()

    p = multiprocessing.Pool()

    gp.print_log(gp.LogLevel.Normal, 'Scaling YUVs...')
    for seqIdx in gp.sequences:
        temp = (seqIdx, )
        p.apply_async(fun.scale_yuv, temp)
        # fun.scale_yuv(seqIdx)
    p.close()
    p.join()
    gp.print_log(gp.LogLevel.Normal, '')

    gp.client_flag = gp.mgr.list(
        [gp.RunningState.Unfinished for _ in range(len(gp.clients))])
    p = multiprocessing.Pool()
    gp.print_log(gp.LogLevel.Normal, 'Start offline test...')

    while 1:
        # Find a unfinished cases
        gp.process_lock.acquire()
        [all_start, all_finish, start_idx, end_idx] = fun.get_next_case()
        gp.process_lock.release()
        client_num = end_idx - start_idx

        if all_finish:
            break
        if all_start:
            time.sleep(1)
            continue

        require_capacity = 0
        for client_idx in range(start_idx, end_idx):
            client = gp.clients[client_idx]
            assert isinstance(client, fun.Client)
            require_capacity += client.capacity_

        # Hold to wait enough processors
        while 1:
            gp.process_lock.acquire()
            if gp.running_process.value + client_num <= gp.active_process \
                    and gp.used_capacity.value + require_capacity <= gp.capacity:
                gp.running_process.value += client_num
                gp.used_capacity.value += require_capacity
                gp.process_lock.release()
                break
            elif gp.used_capacity.value == 0 and require_capacity > gp.capacity:
                gp.print_log(
                    gp.LogLevel.Normal,
                    "Not enough capacity to run this case: " +
                    gp.clients[start_idx].case_)
            gp.process_lock.release()
            time.sleep(1)

        if gp.connection_type == gp.connection[0] and os.popen(
                'pgrep ' + gp.server.get_executable_name()).read() == '':
            os.system(gp.server.get_full_path_executable() + ' >>/dev/null &')
            gp.print_log(
                gp.LogLevel.Normal, 'Restart Server! Current Server PID is ' +
                os.popen('pgrep ' + gp.server.get_executable_name()).read())

        for client_idx in range(start_idx, end_idx):
            gp.process_lock.acquire()
            gp.client_flag[client_idx] = gp.RunningState.Running
            gp.process_lock.release()
            client = gp.clients[client_idx]
            assert isinstance(client, fun.Client)
            uid_dir_name = str(client.uid_) + gp.string_join + client.config_

            client_dir = gp.generate_dir_path(gp.result_dir, scenario,
                                              client.case_, uid_dir_name)
            gp.create_dir(client_dir)
            fun.os_system('cp ' + gp.network_dir +
                          gp.networks[client.network_] + ' ' + client_dir)
            gp.client.copy_executable_to_dir(client_dir)

            if gp.connection_type == gp.connection[1]:
                log_dir = gp.generate_dir_path(gp.cur_log_dir, scenario,
                                               client.case_, uid_dir_name)
                fun.os_system('cp ' + log_dir + 'enc_online_parameters* ' +
                              client_dir)

            gp.print_log(
                gp.LogLevel.Normal, 'Running Case ' + client.case_ +
                ' Client with UID ' + str(client.uid_) + ', Duration ' +
                str(client.duration_) + ', Time ' + time.strftime(
                    '%Y_%m_%d_%H_%M_%S', time.localtime(time.time())))
            p.apply_async(fun.run_client,
                          args=(client_idx, client_dir, scenario))
            # fun.run_client(client_idx, client_dir, scenario)

        gp.print_log(gp.LogLevel.Normal, '')

    p.close()
    p.join()

    if gp.mode == 'Overnight':
        gp.client_flag = gp.mgr.list(
            [gp.RunningState.Unfinished for _ in range(len(gp.clients))])
        p = multiprocessing.Pool()
        gp.print_log(gp.LogLevel.Normal, 'Start VQM test...')

        while 1:
            # Find a unfinished cases
            gp.process_lock.acquire()
            [all_start, all_finish, start_idx, end_idx] = fun.get_next_case()
            gp.process_lock.release()

            if all_finish:
                break
            if all_start:
                time.sleep(1)
                continue

            # Hold to wait enough processors
            for client_idx in range(start_idx, end_idx):
                while 1:
                    gp.process_lock.acquire()
                    if gp.running_process.value < gp.active_process / 2:
                        gp.running_process.value += 1
                        gp.process_lock.release()
                        break
                    gp.process_lock.release()
                    time.sleep(1)

                gp.process_lock.acquire()
                gp.client_flag[client_idx] = gp.RunningState.Running
                gp.process_lock.release()

                client = gp.clients[client_idx]
                assert isinstance(client, fun.Client)
                uid_dir_name = str(
                    client.uid_) + gp.string_join + client.config_
                client_dir = gp.generate_dir_path(gp.result_dir, scenario,
                                                  client.case_, uid_dir_name)
                gp.vqm_test.copy_executable_to_dir(client_dir)
                gp.decode_stream.copy_executable_to_dir(client_dir)
                gp.scale.copy_executable_to_dir(client_dir)

                gp.print_log(
                    gp.LogLevel.Normal, 'Running VQM Case ' + client.case_ +
                    ' Client with UID ' + str(client.uid_) + ', Duration ' +
                    str(client.duration_) + ', Time ' + time.strftime(
                        '%Y_%m_%d_%H_%M_%S', time.localtime(time.time())))
                p.apply_async(fun.run_vqm, args=(client_idx, client_dir))
                # fun.run_vqm(client_idx, client_dir)

            gp.print_log(gp.LogLevel.Normal, '')

        p.close()
        p.join()

    fun.saving_logs(scenario)

    if gp.scenario_crash.value != 0:
        gp.print_log(
            gp.LogLevel.Normal, 'Total ' + str(gp.scenario_crash.value) +
            ' crashes in for ' + scenario + '!!!')
        gp.total_crash += gp.scenario_crash.value
        gp.scenario_crash.value = 0
示例#11
0

if __name__ == '__main__':
    t = time.time()
    gp.print_log_filter = gp.LogLevel.Normal
    run = 2
    if len(sys.argv) < 2:
        gp.print_log(gp.LogLevel.Normal, 'Not enough input!')
        exit()

    gp.log_file = open(sys.argv[1] + '.log', 'w', 0)

    if sys.argv[1] == 'Regression':
        gp.mode = 'Regression'
        gp.cur_log_dir = gp.data_dir + 'Anchor/'
        gp.create_dir(gp.cur_log_dir)
        gp.ref_log_dir = gp.data_dir + 'Test/'
        gp.create_dir(gp.ref_log_dir)
        if len(sys.argv) >= 4:
            gp.ref_commit_id = sys.argv[2][0:7]
            gp.cur_commit_id = sys.argv[3][0:7]
    elif sys.argv[1] == 'Overnight':
        gp.mode = 'Overnight'
        gp.backup_log_dir = gp.data_dir + 'Backups/'
        set_up(gp.data_dir + gp.cur_time)
        run = 1
    else:
        gp.print_log(gp.LogLevel.Normal, 'Error configure!')
        exit()

    for iteration in range(0, run):