Esempio n. 1
0
 def select(ast, utility_set):
     for ut in data_tools.get_utilities(ast):
         if not ut in utility_set:
             print('Utility currently not handled: {} - {}'.format(
                 ut, data_tools.ast2command(ast, loose_constraints=True)))
             return False
     return True
Esempio n. 2
0
def compute_top_utilities(path, k):
    print('computing top most frequent utilities...')
    utilities = collections.defaultdict(int)
    with open(path) as f:
        while (True):
            command = f.readline().strip()
            if not command:
                break
            ast = data_tools.bash_parser(command, verbose=False)
            for u in data_tools.get_utilities(ast):
                utilities[u] += 1
    top_utilities = []

    freq_threshold = -1
    for u, freq in sorted(utilities.items(), key=lambda x: x[1], reverse=True):
        if freq_threshold > 0 and freq < freq_threshold:
            break
        # Xingyu Wei: To udpate for word assignment and control flow,
        # we remove the Grey List
        if u in bash.BLACK_LIST:
            continue
        top_utilities.append(u)
        print('{}: {} ({})'.format(len(top_utilities), u, freq))
        if len(top_utilities) == k:
            freq_threshold = freq
    top_utilities = set(top_utilities)
    return top_utilities
Esempio n. 3
0
def run():
    sqlite_filename = sys.argv[1]
    url_prefix = 'https://stackoverflow.com/questions/'

    urls = {}
    commands = {}

    with sqlite3.connect(sqlite_filename, detect_types=sqlite3.PARSE_DECLTYPES) as db:
        count = 0
        for post_id, answer_body in db.cursor().execute("""
                SELECT questions.Id, answers.Body FROM questions, answers
                WHERE questions.Id = answers.ParentId
                ORDER BY questions.Score DESC"""):
            print(post_id)
            for code_block in extract_code(answer_body):
                for cmd in extract_oneliner_from_code(code_block):
                    print('command string: {}'.format(cmd))
                    ast = data_tools.bash_parser(cmd)
                    if not ast:
                        continue
                    utilities = data_tools.get_utilities(ast)
                    for utility in utilities:
                        if utility in bash.top_100_utilities:
                            print('extracted: {}, {}'.format(utility, cmd))
                            temp = data_tools.ast2template(ast, loose_constraints=True)
                            if not utility in commands:
                                commands[utility] = {}
                                commands[utility][temp] = cmd
                                urls[utility] = {'{}{}'.format(url_prefix, post_id)}
                            else:
                                if len(commands[utility]) >= NUM_COMMAND_THRESHOLD:
                                    continue
                                if not temp in commands[utility]:
                                    commands[utility][temp] = cmd
                                    urls[utility].add('{}{}'.format(url_prefix, post_id))
            count += 1
            if count % 1000 == 0:
                completed = False
                for utility in bash.top_100_utilities:
                    if not utility in commands or len(commands[utility]) < NUM_COMMAND_THRESHOLD:
                        completed = False
                    else:
                        print('{} collection done.'.format(utility))

                if completed:
                    break

    with open('stackoverflow.urls', 'wb') as o_f:
        pickle.dump(urls, o_f)
    with open('stackoverflow.commands', 'wb') as o_f:
        pickle.dump(commands, o_f)

    for utility in commands:
        print('{} ({})'.format(utility, len(commands[utility])))
        for cmd in commands[utility]:
            print(cmd)
Esempio n. 4
0
def get_u_hist_from_file(input_file):
    u_hist = collections.defaultdict(int)
    with open(input_file) as f:
        for cmd in f:
            ast = data_tools.bash_parser(cmd, verbose=False)
            for u in data_tools.get_utilities(ast):
                if u in bash.BLACK_LIST or u in bash.GREY_LIST:
                    continue
                u_hist[u] += 1
    return u_hist
Esempio n. 5
0
def populate_command_tags():
    for cmd in Command.objects.all():
        if len(cmd.str) > 600:
            cmd.delete()
        else:
            cmd.tags.clear()
            print(cmd.str)
            ast = data_tools.bash_parser(cmd.str)
            for utility in data_tools.get_utilities(ast):
                print(utility)
                cmd.tags.add(get_tag(utility))
            cmd.save()
Esempio n. 6
0
def get_command(command_str):
    command_str = command_str.strip()
    if Command.objects.filter(str=command_str).exists():
        cmd = Command.objects.get(str=command_str)
    else:
        cmd = Command.objects.create(str=command_str)
        ast = data_tools.bash_parser(command_str)
        for utility in data_tools.get_utilities(ast):
            cmd.tags.add(get_tag(utility))
        template = data_tools.ast2template(ast, loose_constraints=True)
        cmd.template = template
        cmd.save()
    return cmd
Esempio n. 7
0
def compute_flag_stats():
    input_file = sys.argv[1]
    train_file = sys.argv[2]

    u_hist = collections.defaultdict(int)
    with open(input_file) as f:
        for cmd in f:
            ast = data_tools.bash_parser(cmd, verbose=False)
            for u in data_tools.get_utilities(ast):
                if u in bash.BLACK_LIST or u in bash.GREY_LIST:
                    continue
                u_hist[u] += 1

    sorted_u_by_freq = sorted(u_hist.items(), key=lambda x: x[1], reverse=True)
    most_frequent_10 = [u for u, _ in sorted_u_by_freq[:10]]
    least_frequent_10 = [u for u, _ in sorted_u_by_freq[-10:]]

    most_frequent_10_flags = collections.defaultdict(set)
    least_frequent_10_flags = collections.defaultdict(set)
    with open(train_file) as f:
        for cmd in f:
            tokens = data_tools.bash_tokenizer(cmd,
                                               loose_constraints=True,
                                               with_flag_head=True)
            for token in tokens:
                if '@@' in token:
                    u, f = token.split('@@')
                    if u in most_frequent_10:
                        most_frequent_10_flags[u].add(f)
                    if u in least_frequent_10:
                        least_frequent_10_flags[u].add(f)

    for u in most_frequent_10:
        if u in most_frequent_10_flags:
            print(u, data_tools.get_utility_statistics(u),
                  len(most_frequent_10_flags[u]))
        else:
            print(u, data_tools.get_utility_statistics(u), 0)
    print()
    for u in least_frequent_10:
        if u in least_frequent_10_flags:
            print(u, data_tools.get_utility_statistics(u),
                  len(least_frequent_10_flags[u]))
        else:
            print(u, data_tools.get_utility_statistics(u), 0)
Esempio n. 8
0
def u_hist_to_radar_chart():
    input_file = sys.argv[1]

    u_hist = collections.defaultdict(int)
    with open(input_file) as f:
        for cmd in f:
            ast = data_tools.bash_parser(cmd, verbose=False)
            for u in data_tools.get_utilities(ast):
                if u in bash.BLACK_LIST or u in bash.GREY_LIST:
                    continue
                u_hist[u] += 1

    selected_utilities = []
    for i, (u, freq) in enumerate(
            sorted(u_hist.items(), key=lambda x: x[1], reverse=True)):
        if i >= 50:
            print('{{axis:"{}",value:{:.2f}}},'.format(u, freq))
            selected_utilities.append(u)
    print()
Esempio n. 9
0
def print_error_analysis_csv(grouped_dataset,
                             prediction_list,
                             FLAGS,
                             cached_evaluation_results=None,
                             group_by_utility=False,
                             error_predictions_only=True):
    """
    Convert dev/test set examples to csv format so as to make it easier for
    human annotators to enter their judgements.

    :param grouped_dataset: dev/test set grouped by natural language.
    :param prediction_list: model predictions.
    :param FLAGS: experiment hyperparameters.
    :param cached_evaluation_results: cached evaluation results from previous
        rounds.
    :param group_by_utility: if set, group the error examples by the utilities
        used in the ground truth.
    """
    def mark_example(error_list, example, gt_utility=None):
        if gt_utility:
            error_list[gt_utility].append(example)
        else:
            error_list.append(example)

    eval_bash = FLAGS.dataset.startswith("bash")
    cmd_parser = data_tools.bash_parser if eval_bash \
        else data_tools.paren_parser
    if group_by_utility:
        utility_index = {}
        for line in bash.utility_stats.split('\n'):
            ind, utility, _, _ = line.split(',')
            utility_index[utility] = ind

    grammar_errors = collections.defaultdict(list) if group_by_utility else []
    argument_errors = collections.defaultdict(list) if group_by_utility else []
    example_id = 0
    for nl_temp, data_group in grouped_dataset:
        sc_txt = data_group[0].sc_txt.strip()
        sc_temp = get_example_nl_key(sc_txt)
        tg_strs = [dp.tg_txt for dp in data_group]
        gt_trees = [cmd_parser(cm_str) for cm_str in tg_strs]
        if group_by_utility:
            gt_utilities = functools.reduce(
                lambda x, y: x | y,
                [data_tools.get_utilities(gt) for gt in gt_trees])
            gt_utility = sorted(list(gt_utilities),
                                key=lambda x: int(utility_index[x]))[-1]
        else:
            gt_utility = None
        predictions = prediction_list[example_id]
        example_id += 1
        example = []
        grammar_error, argument_error = False, False
        for i in xrange(min(3, len(predictions))):
            if i == 0:
                output_str = '{},"{}",'.format(example_id,
                                               sc_txt.replace('"', '""'))
            else:
                output_str = ',,'
            pred_cmd = predictions[i]
            tree = cmd_parser(pred_cmd)

            # evaluation ignoring flag orders
            temp_match = tree_dist.one_match(gt_trees,
                                             tree,
                                             ignore_arg_value=True)
            str_match = tree_dist.one_match(gt_trees,
                                            tree,
                                            ignore_arg_value=False)
            if i < len(tg_strs):
                output_str += '"{}",'.format(tg_strs[i].strip().replace(
                    '"', '""'))
            else:
                output_str += ','
            output_str += '"{}",'.format(pred_cmd.replace('"', '""'))
            if not str_match:
                if temp_match:
                    if i == 0:
                        argument_error = True
                        grammar_error = True
                else:
                    if i == 0:
                        grammar_error = True

            example_sig = '{}<NL_PREDICTION>{}'.format(sc_temp, pred_cmd)
            if cached_evaluation_results and \
                    example_sig in cached_evaluation_results:
                output_str += cached_evaluation_results[example_sig]
            else:
                if str_match:
                    output_str += 'y,y'
                elif temp_match:
                    output_str += 'y,'
            example.append(output_str)
        if error_predictions_only:
            if grammar_error:
                mark_example(grammar_errors, example, gt_utility)
            elif argument_error:
                mark_example(argument_errors, example, gt_utility)
        else:
            mark_example(grammar_errors, example, gt_utility)

    return grammar_errors, argument_errors