예제 #1
0
def gen_module(file, recreate):
    model_def = create_model_def(file)
    prefix = "./output/"
    module_name = model_def.name
    module_path = prefix + module_name

    if recreate and os.path.isdir(module_path):
        shutil.rmtree(module_path)

    if os.path.isdir(module_path):
        print("module folder", module_path, "already exists.")
        return

    io_utils.create_dir(module_path)
    db_path = module_path + "/database"
    seed_path = module_path + "/database/data"
    io_utils.create_dir(seed_path)

    copyfile("./templates/api.py", module_path + "/api.py")
    copyfile("./templates/utils.py", module_path + "/utils.py")

    ## copy seed files
    for path in io_utils.list_files("./seeds/" + module_name):
        copyfile(path, seed_path + "/" + os.path.basename(path))

    write_db_settings(model_def, db_path + "/base.py")
    for m in model_def.models:
        write_model(model_def, m)

    print("all done.")
예제 #2
0
def run_re_by_pairs(news_iter, pairs_list_filepath, out_dir, settings,
                    parse_frames_in_news_sentences):
    assert (isinstance(news_iter, collections.Iterable))
    assert (isinstance(pairs_list_filepath, str))
    assert (isinstance(out_dir, str))
    assert (isinstance(settings, Settings))
    assert (isinstance(parse_frames_in_news_sentences, bool))

    create_dir(out_dir)

    # TODO. Refactor this.
    with open(pairs_list_filepath, 'r') as f:
        opinions = utils.read_opinions(
            filepath=pairs_list_filepath,
            custom_opin_ends_iter=lambda use_sentiment:
            OpinionStatisticBasePrinter.iter_opinion_end_values(
                f=f, read_sentiment=True),
            synonyms=settings.Synonyms)

    statistic_printer = OpinionStatisticBasePrinter(synonyms=settings.Synonyms)

    contexts_printer = ContextsPrinter(dir=out_dir, prefix="news_er_")

    pair_based = PairBasedTextProcessor(
        settings=settings,
        contexts_printer=contexts_printer,
        opinion_statistic_printer=statistic_printer,
        parse_frames_in_news_sentences=parse_frames_in_news_sentences,
        expected_opinions=opinions)

    for text_index, news_info in news_iter:
        pair_based.process_news_and_print(news_info=news_info,
                                          text_index=text_index)

    statistic_printer.save(filepath=path.join(out_dir, "stat.txt"))
def get_output_root_task_new_part_folder(out_dir, task_name):
    assert (isinstance(out_dir, str))
    assert (isinstance(task_name, str))

    for part_index in range(100):

        folder = os.path.join(
            out_dir,
            "{task_name}-P{part}/".format(task_name=str(task_name),
                                          part=str(part_index).zfill(3)))

        if os.path.exists(folder):
            continue

        io_utils.create_dir(folder)

        return folder
예제 #4
0
def run_re_diff(news_iter, pairs_list_filepath, out_dir, settings,
                parse_frames_in_news_sentences):
    assert (isinstance(news_iter, collections.Iterable))
    assert (isinstance(settings, Settings))
    assert (isinstance(pairs_list_filepath, str))
    assert (isinstance(out_dir, str))
    assert (isinstance(parse_frames_in_news_sentences, bool))

    create_dir(out_dir)

    # TODO. Refactor this.
    with open(pairs_list_filepath, 'r') as f:
        opinions = utils.read_opinions(
            filepath=pairs_list_filepath,
            custom_opin_ends_iter=lambda use_sentiment:
            OpinionStatisticBasePrinter.iter_opinion_end_values(
                f=f, read_sentiment=True),
            synonyms=settings.Synonyms)

    # Init printers
    statistic_printer = OpinionStatisticBasePrinter(synonyms=settings.Synonyms)
    contexts_printer = ContextsPrinter(dir=out_dir, prefix="diffstat_er_")
    diffstat_printer = DiffStatisticTitleOpinionsPrinter(
        filepath=path.join(out_dir, "diffstat.txt"),
        opinions=opinions,
        synonyms=settings.Synonyms)
    diffctx_printer = DiffContextsPrinter(directory=out_dir,
                                          filename="diff.txt")
    samectx_printer = DiffContextsPrinter(directory=out_dir,
                                          filename="same.txt")
    stat_objs_printer = StatisticObjectsPrinter(
        path.join(out_dir, "objs_stat.txt"))

    pair_based = PairBasedTextProcessor(
        settings=settings,
        contexts_printer=contexts_printer,
        opinion_statistic_printer=statistic_printer,
        expected_opinions=opinions,
        object_statistic_printer=stat_objs_printer,
        parse_frames_in_news_sentences=parse_frames_in_news_sentences)

    frame_based = FrameBasedTextProcessor(
        settings=settings,
        contexts_printer=contexts_printer,
        opinion_statistic_printer=statistic_printer,
        parse_frames_in_news_sentences=parse_frames_in_news_sentences)

    opinion_filtered = OpinionFilteredTextProcessor(
        opinion_dependent_processor=pair_based,
        frame_dependent_processor=frame_based,
        diff_stat=diffstat_printer,
        diff_ctx=diffctx_printer,
        same_ctx=samectx_printer)

    for text_index, news_info in news_iter:
        opinion_filtered.process_news(news_info=news_info,
                                      text_index=text_index)

    # Printing.
    diffstat_printer.print_statistic()
    statistic_printer.save(filepath=path.join(out_dir, "nonused.txt"))
    stat_objs_printer.save()
    # Exporting results.
    news_processed = 0
    added_words = set()
    f_name = "{}.txt".format(ner_type)

    # Init obj values extractor.
    ner_class_type = Default.get_class_by_ner_name(ner_type)
    text_object_authorizer = TextObjectAuthorizer(ner_type=ner_class_type)
    obj_values_extractor = TextObjectValuesExtractor(
        ner_cache=ner_cache,
        stemmer=Default.create_default_stemmer(),
        default_auth_check=lambda text_obj: text_object_authorizer.is_auth(
            text_obj))

    create_dir(output_dir)
    print("Output dir: {}".format(output_dir))

    with ner_cache:
        with open(join(output_dir, f_name), "w") as f:
            for _, news_info in reader.get_news_iter(source_dir):
                assert (isinstance(news_info, NewsInfo))

                if not ner_cache.is_news_registered(
                        news_id=news_info.FileName):
                    continue

                news_processed += 1

                for obj in obj_values_extractor.iter_for_news(
                        news_info=news_info):
예제 #6
0
def detect(
    image_path: str,
    config: dict) -> None:
    """Detects keypoints for a given input image.
    Draws keypoints into the image.
    Returns keypoints, heatmap and image with keypoints.

    Arguments:
        image_path {str} -- Path to the image.
        config {dict} -- General configuations. See config_run_detectors.py.

    Returns:
        Tuple[List[cv2.KeyPoint], np.array, (np.array | None) ] -- Returns list
        of cv2.KeyPoint, an image with the corresponding keypoints, and if
        available, an heatmap.

    1) Create temporary folder `tmp` to save intermediate output.
    2a) Load and smart scale the image
    2b) Save the resulting image in `tmp`.
    3a) Subprocess call to TILDE for keypoints. Save output in `tmp`
    4a) Load keypoints from 'tmp' and convert keypoints to cv2.Keypoints.
    4b) Draw list of cv2.KeyPoints into image.
    5) Return KeyPoint list and image with keypoints.
    """

    # 1)
    io_utils.create_dir(config['tmp_dir_tilde'])

    # 2)
    img = cv2.imread(image_path, 0)
    img = io_utils.smart_scale(img, config['max_size'], prevent_upscaling=config['prevent_upscaling']) if config['max_size'] is not None else img

    # 2b)
    tmp_filename = 'tmp_img.png'
    tmp_keypoints = 'keypoints.csv'
    tmp_heatmap = 'heatmap.csv'

    path_tmp_img = os.path.join(config['tmp_dir_tilde'], tmp_filename)
    path_tmp_kpts = os.path.join(config['tmp_dir_tilde'], tmp_keypoints)
    path_tmp_heatmap = os.path.join(config['tmp_dir_tilde'], tmp_heatmap)

    cv2.imwrite(path_tmp_img, img)

    # 3a)
    imageDir = config['tmp_dir_tilde']
    outputDir = config['tmp_dir_tilde']
    fileName = tmp_filename
    filterPath = '/home/tilde/TILDE/c++/Lib/filters'
    filterName = 'Mexico.txt'

    # Call use_tilde.cpp
    # The output will be saved into
    # - config['tmp_dir_tilde']/keypoints.csv and
    # - config['tmp_dir_tilde']/heatmap.csv
    subprocess.check_call([
        './use_tilde',
        '--imageDir', imageDir,
        '--outputDir', outputDir,
        '--fileName', fileName,
        '--filterPath', filterPath,
        '--filterName', filterName])

    # 4)
    kpts_file = np.loadtxt(path_tmp_kpts, dtype=int, comments='#', delimiter=', ')

    max_num_keypoints = config['max_num_keypoints']
    if max_num_keypoints:
        kpts_file = kpts_file[:max_num_keypoints]

    kpts = [cv2.KeyPoint(x[0], x[1], _size=1) for x in kpts_file]
    heatmap = np.loadtxt(path_tmp_heatmap, dtype=float, comments='# ', delimiter=', ')
    img_kp = io_utils.draw_keypoints(img, kpts, config)

    return (kpts, img_kp, heatmap)
def main(args):
    # Get necessary directories
    output_dir = args.output_dir
    image_dir = args.image_dir
    data_dir = args.data_dir
    tensorflow_dir = os.path.join(data_dir, args.tensorflow_dir)
    stats_dir = os.path.join(data_dir, args.stats_dir)

    train_name = args.train_name
    stats_name = args.stats_name
    save_feature_name = args.save_feature

    # Create list of file names.
    file_list = args.file_list.split(' ')

    # Parameters
    alpha = args.alpha
    patch_size = args.patch_size
    batch_size = args.batch_size
    descriptor_dim = args.descriptor_dim

    stats_path = os.path.join(stats_dir, 'stats_{}.pkl'.format(stats_name))

    print('Loading training stats:')
    with open(stats_path, 'rb') as src:
        mean, std = pickle.load(src, encoding='utf-8')
    print('Training data loaded:\nMean: {}\nStd: {}'.format(mean, std))

    CNNConfig = {
        'patch_size': patch_size,
        'descriptor_dim': descriptor_dim,
        'batch_size': batch_size,
        'alpha': alpha,
        'train_flag': False
    }

    cnn_model = patch_cnn.PatchCNN(CNNConfig)

    saver = tf.train.Saver()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        try:
            saver.restore(
                sess,
                os.path.join(tensorflow_dir,
                             '{}_model.ckpt'.format(train_name)))
            # saver.restore(sess, '../tensorflow_model/'+train_name+'_model.ckpt')
            print('Model restored.')
        except:
            print('No model found .Exit.')
            exit()

        for file_path in file_list:
            collection_name, set_name, file_base, extension = io_utils.get_path_components(
                file_path)
            file_name = '{}.{}'.format(file_base, extension)

            save_dir = os.path.join(output_dir, save_feature_name,
                                    collection_name, set_name)
            io_utils.create_dir(
                save_dir)  # Create folder if it does not exists

            output_list = []
            if extension in ['.ppm', '.pgm', '.png', '.jpg']:
                save_file = file_base + '.mat'
                save_name = os.path.join(save_dir, save_file)

                #read image
                img, ratio = read_image_from_name(file_path)
                if img.shape[2] == 1:
                    img = np.repeat(img, 3, axis=2)

                #build image pyramid
                pyramid = pyramid_gaussian(img,
                                           max_layer=4,
                                           downscale=np.sqrt(2))

                #predict transformation
                for (j, resized) in enumerate(pyramid):
                    fetch = {'o1': cnn_model.o1}

                    resized = np.asarray(resized)
                    resized = (resized - mean) / std
                    resized = resized.reshape(
                        (1, resized.shape[0], resized.shape[1],
                         resized.shape[2]))

                    result = sess.run(fetch,
                                      feed_dict={cnn_model.patch: resized})
                    result_mat = result['o1'].reshape(
                        (result['o1'].shape[1], result['o1'].shape[2],
                         result['o1'].shape[3]))
                    output_list.append(result_mat)

                sio.savemat(save_name, {'output_list': output_list})
예제 #8
0
def detect_bulk(file_list: List[str], config: Dict) -> None:
    """Computes keypoints for all files in `file_list`. Additionally for each
    file, create an image with the corresponding keypoints drawn into it.
    All results will be saved within the `tmp` folder for this module.

    Arguments:
        file_list {List[str]} -- List of all images for which to compute keypoints.
        config {Dict} -- General configuration object. See config_run_detectors.py
        for more information.

    Returns:
        None -- All results here are saved within the `tmp` dir specified within
        the `config` object.
    """

    try:
        # Create feature map for each image in 'covariant_points' folder
        subprocess.check_call([
            'python',
            'patch_network_point_test.py',
            '--save_feature',
            'covariant_points',
            '--output_dir',
            config['tmp_dir_tcovdet'],
            '--file_list',
            ' '.join(file_list),
        ])

    except Exception as e:
        print('TCovDet: Covariant feature map creation failed.')
        print(e)
        raise e

    try:
        collection_names = []
        set_names = []
        image_names = []
        for file_path in file_list:
            collection_name, set_name, file_base, extension = io_utils.get_path_components(
                file_path)
            collection_names.append(collection_name)
            set_names.append(set_name)
            image_names.append(file_base)

        # The path to this file
        matlab_config_path = os.path.join(config['tmp_dir_tcovdet'],
                                          'filelist.mat')
        # Where to save the keypoints
        dir_output = os.path.join(config['tmp_dir_tcovdet'], 'feature_points')
        io_utils.create_dir(dir_output)  # Create on the fly if not existent

        # Where the .mat files of the covariant step lie
        dir_data = os.path.join(config['tmp_dir_tcovdet'], 'covariant_points')

        # Set maxinal number of keypoints to find
        point_number = 1000 if config['max_num_keypoints'] is None else config[
            'max_num_keypoints']

        savemat(
            matlab_config_path, {
                'file_list': file_list,
                'collection_names': collection_names,
                'set_names': set_names,
                'image_names': image_names,
                'dir_output': dir_output,
                'dir_data': dir_data,
                'point_number': point_number
            })
        subprocess.check_call([
            'matlab', '-nosplash', '-r',
            "point_extractor('vlfeat-0.9.21', '{}');quit".format(
                matlab_config_path)
        ])

    except Exception as e:
        print('TCovDet: Keypoint feature map creation failed.')
        print(e)
        raise e

    # Load each created .mat file, extract the keypoints (Column 2 and 5),
    # create list of cv2.KeyPoint.
    # Then load the image, scale it, draw the keypoints in it and save everything
    for i in tqdm(range(len(file_list))):
        file = file_list[i]
        mat_path = os.path.join(config['tmp_dir_tcovdet'], 'feature_points',
                                collection_names[i], set_names[i],
                                image_names[i] + '.mat')
        kpts_numpy = loadmat(mat_path)['feature'][:, [2, 5]]  # numpy array
        scores = loadmat(mat_path)['score']

        if len(kpts_numpy):
            kpts_cv2 = [
                cv2.KeyPoint(x[0], x[1], 1.0, _response=scores[idx])
                for idx, x in enumerate(kpts_numpy)
            ]  # list of cv2.KeyPoint

            img = cv2.imread(file, 0)
            if (img.shape[0] * img.shape[1]) > (1024 * 768):
                ratio = (1024 * 768 /
                         float(img.shape[0] * img.shape[1]))**(0.5)
                img = cv2.resize(
                    img,
                    (int(img.shape[1] * ratio), int(img.shape[0] * ratio)),
                    interpolation=cv2.INTER_CUBIC)

            img_kp = io_utils.draw_keypoints(img, kpts_cv2, config)

            # Save everything.
            io_utils.save_detector_output(file, config['detector_name'],
                                          config, kpts_cv2, img_kp, None)
        else:
            print('Warning: Did not find any keypoints!')