Example #1
0
def apply_preproc(*args):
    from preprocessing.preprocessing import Preprocessing
    preproc = Preprocessing()
    preproc_imgs = []

    for img in args:
        new_img = preproc.preprocess_img(img)
        if new_img is not None:
            preproc_imgs.append(new_img)
    return preproc_imgs
Example #2
0
    def __init__(self, pitch, color, our_side, video_port=0, comm_port='/dev/ttyUSB0', comms=1):
        """
        Entry point for the SDP system.

        Params:
            [int] video_port                port number for the camera
            [string] comm_port              port number for the arduino
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] our_side               the side we're on - 'left' or 'right'
            *[int] port                     The camera port to take the feed from
            *[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
                                            power wire
            *[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
                                            power wire
        """
        assert pitch in [0, 1]
        assert color in ['yellow', 'blue']
        assert our_side in ['left', 'right']

        self.pitch = pitch

        # Set up the Arduino communications
        self.arduino = Arduino(comm_port, 115200, 1, comms)

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=self.pitch)
        frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        self.vision = Vision(
            pitch=pitch, color=color, our_side=our_side,
            frame_shape=frame.shape, frame_center=center_point,
            calibration=self.calibration)

        # Set up postprocessing for vision
        self.postprocessing = Postprocessing()

        # Set up main planner
        self.planner = Planner(our_side=our_side, pitch_num=self.pitch)

        # Set up GUI
        self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)

        self.color = color
        self.side = our_side

        self.preprocessing = Preprocessing()

        self.attacker = Attacker_Controller()
        self.defender = Defender_Controller()
Example #3
0
def main():
    preprocessing = Preprocessing()
    root_path = './test'  # before pickle hierarchy
    directory_dict = {}
    dir_hierarchy = preprocessing.lookup_directory(root_path, directory_dict)
    file_list = list()
    dir_list = list()
    label_num = 0
    for tar_dir in dir_hierarchy:
        file_list += dir_hierarchy[tar_dir]

    for file_path in tqdm(file_list):
        text = extract_text(file_path)
        new_path = file_path[:-4]
        with open(new_path, 'wb') as f:
            pickle.dump(text, f)
Example #4
0
#  print scalibration

vision = _Vision(pitch=0,
                 color='blue',
                 our_side='left',
                 frame_shape=frame.shape,
                 frame_center=cam.get_adjusted_center(frame),
                 calibration=scalibration)

# Set up postprocessing for oldvision

postprocessing = Postprocessing()

GUI = GUI(calibration=scalibration, pitch=0)

preprocessing = Preprocessing()

frame = cam.get_frame()
pre_options = preprocessing.options
# Apply preprocessing methods toggled in the UI
preprocessed = preprocessing.run(frame, pre_options)
frame = preprocessed['frame']
if 'background_sub' in preprocessed:
    cv2.imshow('bg sub', preprocessed['background_sub'])
    cv2.waitKey()

height, width, channels = frame.shape

# frame = frame[425:445, 5:25]

frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
Example #5
0
def dropfile(input_file: str,
             root_path: str,
             cached_DTM=None,
             cached_vocab=None,
             cached_synonym_dict=None,
             verbose=True,
             preprocessing=None,
             scoring=None):
    os.environ['DROPFILE_LOGLEVEL'] = "1" if verbose else "0"
    global plot_number

    normalpreprocessing = Preprocessing()
    dspreprocessing = DependencyStructurePreprocessing()
    nppreprocessing = NounPhrasePreprocessing()
    npreprocessing = NounPreprocessing()
    spacypreprocessing = SpacyPreprocessing()
    twcpreprocessing = TargetWordChunkingPreprocessing()
    cfgpreprocessing = CFGPreprocessing()
    preprocessing_dict = {
        "Preprocessing": normalpreprocessing,
        "DependencyStructurePreprocessing": dspreprocessing,
        "NounPhrasePreprocessing": nppreprocessing,
        "NounPreprocessing": npreprocessing,
        "SpacyPreprocessing": spacypreprocessing,
        "TargetWordChunkingPreprocessing": twcpreprocessing,
        "CFGPreprocessing": cfgpreprocessing
    }
    scoring_dict = {
        "score_mse": score_mse,
        "score_cosine": score_cosine,
        "score_bayes": score_bayes,
        "score_CFG": score_CFG
    }

    if preprocessing is not None and scoring is not None:
        preprocessing_list = [
            "Preprocessing", "DependencyStructurePreprocessing",
            "NounPhrasePreprocessing", "NounPreprocessing",
            "SpacyPreprocessing", "TargetWordChunkingPreprocessing",
            "CFGPreprocessing"
        ]
        if preprocessing not in preprocessing_list:
            print("Enter the valid preprocessing name")
            return

        if preprocessing in cached_DTM and preprocessing in cached_vocab and preprocessing in cached_synonym_dict:
            dir_list, label_score, _, _, _ = \
              scoring_dict[scoring](input_file, root_path, preprocessing_dict[preprocessing],
              cached_DTM[preprocessing], cached_vocab[preprocessing],
              cached_synonym_dict[preprocessing])
        else:
            dir_list, label_score, _, _, _ = \
              scoring_dict[scoring](input_file, root_path, preprocessing_dict[preprocessing], None, None, None)
        if verbose:
            print(label_score)

        score_arr = np.array(label_score).astype(float)
        score_arr = score_arr / sum(score_arr)

        dir_path = dir_list[score_arr.argmax()]

        case = os.listdir(root_path)[0]
        print(f"********** {case} store score ********")
        with open(f'MaxMinDev_{case}', 'wb') as file:  # OS dependency
            score_max = np.max(score_arr)
            score_min = np.min(score_arr)
            dev = score_max - score_min
            MaxMindict = defaultdict(list)
            if OSTYPE == "Darwin":
                MaxMindict[input_file.split("/")[-1]] = [
                    score_max, score_min, dev
                ]
            elif OSTYPE == "Linux":
                MaxMindict[input_file.split("/")[-1]] = [
                    score_max, score_min, dev
                ]
            else:
                MaxMindict[input_file.split("\\")[-1]] = [
                    score_max, score_min, dev
                ]
            pickle.dump(MaxMindict, file)

        plt.figure(plot_number)
        plot_number += 1
        directory_name = [
            path.split('/')[-1].split('\\')[-1] for path in dir_list
        ]
        y = score_arr
        x = np.arange(len(y))
        xlabel = directory_name
        if OSTYPE == "Darwin":
            plt.title("Label Score of {}".format(
                input_file.split('/')[-2] + '_' + input_file.split("/")[-1]))
        elif OSTYPE == "Linux":
            plt.title("Label Score of {}".format(
                input_file.split('/')[-2] + '_' + input_file.split("/")[-1]))
        else:  # Windows
            plt.title("Label Score of {}".format(
                input_file.split('\\')[-2] + '_' + input_file.split("\\")[-1]))
        plt.bar(x, y, color="blue")
        plt.xticks(x, xlabel)
        if OSTYPE == "Darwin":
            plt.savefig("label_score_{}.png".format(
                input_file.split('/')[-2] + '_' + input_file.split("/")[-1]))
        elif OSTYPE == "Linux":
            plt.savefig("label_score_{}.png".format(
                input_file.split('/')[-2] + '_' + input_file.split("/")[-1]))
        else:  # Windows
            plt.savefig("label_score_{}.png".format(
                input_file.split('\\')[-2] + '_' + input_file.split("\\")[-1]))
        plt.close(plot_number - 1)
        return dir_path, cached_DTM, cached_vocab, cached_synonym_dict

    ensembles = [
        {
            "preprocessing": "Preprocessing",
            "scoring": score_cosine,
            "weight": 1
        },
        {
            "preprocessing": "DependencyStructurePreprocessing",
            "scoring": score_cosine,
            "weight": 1
        },
        {
            "preprocessing": "NounPhrasePreprocessing",
            "scoring": score_cosine,
            "weight": 1
        },
        {
            "preprocessing": "NounPreprocessing",
            "scoring": score_cosine,
            "weight": 1
        },
        {
            "preprocessing": "Preprocessing",
            "scoring": score_bayes,
            "weight": 1
        },
        {
            "preprocessing": "SpacyPreprocessing",
            "scoring": score_cosine,
            "weight": 1
        },
        {
            "preprocessing": "Preprocessing",
            "scoring": score_mse,
            "weight": 1
        },
        {
            "preprocessing": "CFGPreprocessing",
            "scoring": score_CFG,
            "weight": 1
        },
    ]

    label_scores = []
    if cached_DTM is None:
        cached_DTM = dict()
    if cached_vocab is None:
        cached_vocab = dict()
    if cached_synonym_dict is None:
        cached_synonym_dict = dict()

    for i, method in enumerate(ensembles):
        preprocessing_name = method["preprocessing"]
        if preprocessing_name in cached_DTM and preprocessing_name in cached_vocab \
                and preprocessing_name in cached_synonym_dict:
            dir_list, label_score, DTM, vocab, synonym_dict = \
              method['scoring'](input_file, root_path, preprocessing_dict[preprocessing_name],
                                cached_DTM[preprocessing_name], cached_vocab[preprocessing_name],
                                cached_synonym_dict[preprocessing_name])
        else:
            dir_list, label_score, DTM, vocab, synonym_dict= \
              method['scoring'](input_file, root_path, preprocessing_dict[preprocessing_name], None, None, None)
            cached_DTM[preprocessing_name] = DTM
            cached_vocab[preprocessing_name] = vocab
            cached_synonym_dict[preprocessing_name] = synonym_dict

        label_scores.append(label_score)

    score_arr = np.array(label_scores)
    for i in range(score_arr.shape[0]):
        score_arr[i] = score_arr[i] / sum(score_arr[i])
    if verbose:
        print(score_arr)
    final_label_score = np.array([0.0] * score_arr.shape[1])
    for i in range(score_arr.shape[0]):
        final_label_score += score_arr[i] * ensembles[i]["weight"]

    case = os.listdir(root_path)[0]
    print(f"********** {case} store score ********")
    with open(f'MaxMinDev_{case}', 'wb') as file:  # OS dependency
        score_max = np.max(final_label_score)
        score_min = np.min(final_label_score)
        dev = score_max - score_min
        MaxMindict = defaultdict(list)
        if OSTYPE == "Darwin":
            MaxMindict[input_file.split("/")[-1]] = [score_max, score_min, dev]
        elif OSTYPE == "Linux":
            MaxMindict[input_file.split("/")[-1]] = [score_max, score_min, dev]
        else:
            MaxMindict[input_file.split("\\")[-1]] = [
                score_max, score_min, dev
            ]
        pickle.dump(MaxMindict, file)

    print("Your OS is ", OSTYPE)
    plt.figure(plot_number)
    plot_number += 1
    directory_name = [path.split('/')[-1].split('\\')[-1] for path in dir_list]
    y = final_label_score
    x = np.arange(len(y))
    xlabel = directory_name
    if OSTYPE == "Darwin":
        plt.title("Label Score of {}".format(
            input_file.split('/')[-2] + '_' + input_file.split("/")[-1]))
    elif OSTYPE == "Linux":
        plt.title("Label Score of {}".format(
            input_file.split('/')[-2] + '_' + input_file.split("/")[-1]))
    else:  # Windows
        plt.title("Label Score of {}".format(
            input_file.split('/')[-1].split("\\")[-2] + '_' +
            input_file.split("\\")[-1]))

    plt.bar(x, y, color="blue")
    plt.xticks(x, xlabel)

    if OSTYPE == "Darwin":
        plt.savefig("label_score_{}.png".format(
            input_file.split('/')[-2] + '_' + input_file.split("/")[-1]))
    elif OSTYPE == "Linux":
        plt.savefig("label_score_{}.png".format(
            input_file.split('/')[-2] + '_' + input_file.split("/")[-1]))
    else:  # Windows
        plt.savefig("label_score_{}.png".format(
            input_file.split('/')[-1].split("\\")[-2] + '_' +
            input_file.split('/')[-1].split("\\")[-1]))

    plt.close(plot_number - 1)
    try:
        dir_path = dir_list[final_label_score.argmax()]
    except:
        dir_path = ''

    return dir_path, cached_DTM, cached_vocab, cached_synonym_dict
Example #6
0
def prepare_env(root_path: str, cached_tokens=None, verbose=False):
    os.environ['DROPFILE_LOGLEVEL'] = "1" if verbose else "0"

    normalpreprocessing = Preprocessing()
    dspreprocessing = DependencyStructurePreprocessing()
    nppreprocessing = NounPhrasePreprocessing()
    npreprocessing = NounPreprocessing()
    spacypreprocessing = SpacyPreprocessing()
    twcpreprocessing = TargetWordChunkingPreprocessing()
    cfgpreprocessing = CFGPreprocessing()
    preprocessing_dict = {
        "Preprocessing": normalpreprocessing,
        "DependencyStructurePreprocessing": dspreprocessing,
        "NounPhrasePreprocessing": nppreprocessing,
        "NounPreprocessing": npreprocessing,
        "SpacyPreprocessing": spacypreprocessing,
        "TargetWordChunkingPreprocessing": twcpreprocessing,
        "CFGPreprocessing": cfgpreprocessing
    }

    DTM_dict = dict()
    vocab_dict = dict()
    synonym_dict_dict = dict()

    start = time.time()
    directory_dict = defaultdict(
        list)  # empty dictionary for lookup_directory function
    dir_hierarchy = normalpreprocessing.lookup_directory(
        root_path, directory_dict)
    file_list = list()
    doc_dict = dict()

    for tar_dir in dir_hierarchy:
        file_list += dir_hierarchy[tar_dir]

    if cached_tokens is None:
        tokens_dict = defaultdict(dict)
    else:
        tokens_dict = cached_tokens

    for file in file_list:
        if file not in tokens_dict["Preprocessing"]:
            doc_dict[file] = normalpreprocessing.file2text(file)
    if verbose:
        print(f"file2text takes {time.time() - start:.4f} s.")

    for name, preprocessing in preprocessing_dict.items():
        if verbose:
            print(f"{name} started")
        # preprocessing : lookup hierarchy of root path
        directory_dict = defaultdict(
            list)  # empty dictionary for lookup_directory function

        start = time.time()
        dir_hierarchy = preprocessing.lookup_directory(
            root_path, directory_dict)  # change it to have 2 parameter
        if verbose:
            print(f"{name}.lookup_directory takes {time.time()-start:.4f} s.")

        file_list = list()
        dir_list = list()
        label_num = 0
        for tar_dir in dir_hierarchy:
            file_list += dir_hierarchy[tar_dir]
            dir_list.append(tar_dir)
            label_num += 1

        # preprocessing : build vocabulary from file_list
        # if (DTM is None) and (vocab is None) and (synonym_dict is None):
        doc_list = list()
        start = time.time()
        for file in file_list:
            if name in tokens_dict and file in tokens_dict[name]:
                tokens = tokens_dict[name][file]
            else:
                tokens = preprocessing.text2tok(doc_dict[file])
            doc_list.append(tokens)
            tokens_dict[name][file] = tokens

        if verbose:
            print(f"{name}.text2tok takes {time.time()-start:.4f} s.")
        start = time.time()
        vocab, synonym_dict = preprocessing.build_vocab(doc_list)
        if verbose:
            print(f"{name}.build_vocab takes {time.time()-start:.4f} s.")
        # preprocessing : build DTM of files under root_path
        start = time.time()
        DTM = preprocessing.build_DTM(doc_list, vocab, synonym_dict)
        if verbose:
            print(f"{name}.build_DTM takes {time.time()-start:.4f} s.")

        DTM_dict[name] = DTM
        vocab_dict[name] = vocab
        synonym_dict_dict[name] = synonym_dict

    return DTM_dict, vocab_dict, synonym_dict_dict, tokens_dict